static int dcphy_attach(device_t dev) { struct mii_softc *sc; struct dc_softc *dc_sc; device_t brdev; sc = device_get_softc(dev); mii_phy_dev_attach(dev, MIIF_NOISOLATE | MIIF_NOMANPAUSE, &dcphy_funcs, 0); /*PHY_RESET(sc);*/ dc_sc = sc->mii_pdata->mii_ifp->if_softc; CSR_WRITE_4(dc_sc, DC_10BTSTAT, 0); CSR_WRITE_4(dc_sc, DC_10BTCTRL, 0); brdev = device_get_parent(sc->mii_dev); switch (pci_get_subdevice(brdev) << 16 | pci_get_subvendor(brdev)) { case COMPAQ_PRESARIO_ID: /* Example of how to only allow 10Mbps modes. */ sc->mii_capabilities = BMSR_ANEG | BMSR_10TFDX | BMSR_10THDX; break; default: if (dc_sc->dc_pmode == DC_PMODE_SIA) sc->mii_capabilities = BMSR_ANEG | BMSR_10TFDX | BMSR_10THDX; else sc->mii_capabilities = BMSR_ANEG | BMSR_100TXFDX | BMSR_100TXHDX | BMSR_10TFDX | BMSR_10THDX; break; } sc->mii_capabilities &= sc->mii_capmask; device_printf(dev, " "); mii_phy_add_media(sc); printf("\n"); MIIBUS_MEDIAINIT(sc->mii_dev); return (0); }
static int vtpci_read_ivar(device_t dev, device_t child, int index, uintptr_t *result) { struct vtpci_softc *sc; sc = device_get_softc(dev); if (sc->vtpci_child_dev != child) return (ENOENT); switch (index) { case VIRTIO_IVAR_DEVTYPE: *result = pci_get_subdevice(dev); break; default: return (ENOENT); } return (0); }
static struct mps_ident * mps_find_ident(device_t dev) { struct mps_ident *m; for (m = mps_identifiers; m->vendor != 0; m++) { if (m->vendor != pci_get_vendor(dev)) continue; if (m->device != pci_get_device(dev)) continue; if ((m->subvendor != 0xffff) && (m->subvendor != pci_get_subvendor(dev))) continue; if ((m->subdevice != 0xffff) && (m->subdevice != pci_get_subdevice(dev))) continue; return (m); } return (NULL); }
const static struct pci_id * uart_pci_match(device_t dev, const struct pci_id *id) { uint16_t device, subdev, subven, vendor; vendor = pci_get_vendor(dev); device = pci_get_device(dev); while (id->vendor != 0xffff && (id->vendor != vendor || id->device != device)) id++; if (id->vendor == 0xffff) return (NULL); if (id->subven == 0xffff) return (id); subven = pci_get_subvendor(dev); subdev = pci_get_subdevice(dev); while (id->vendor == vendor && id->device == device && (id->subven != subven || id->subdev != subdev)) id++; return ((id->vendor == vendor && id->device == device) ? id : NULL); }
struct csa_card * csa_findsubcard(device_t dev) { int i; struct card_type *card; struct csa_card *subcard; card = csa_findcard(dev); if (card == NULL) return &nocard; subcard = card->cards; i = 0; while (subcard[i].subvendor != 0) { if (pci_get_subvendor(dev) == subcard[i].subvendor && pci_get_subdevice(dev) == subcard[i].subdevice) { return &subcard[i]; } i++; } return &subcard[i]; }
static int ixl_probe(device_t dev) { ixl_vendor_info_t *ent; u16 pci_vendor_id, pci_device_id; u16 pci_subvendor_id, pci_subdevice_id; char device_name[256]; #if 0 INIT_DEBUGOUT("ixl_probe: begin"); #endif pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != I40E_INTEL_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixl_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == 0)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == 0))) { sprintf(device_name, "%s, Version - %s", ixl_strings[ent->index], ixl_driver_version); device_set_desc_copy(dev, device_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); }
static const struct aac_ident * aac_find_ident(device_t dev) { const struct aac_ident *m; u_int16_t vendid, devid, sub_vendid, sub_devid; vendid = pci_get_vendor(dev); devid = pci_get_device(dev); sub_vendid = pci_get_subvendor(dev); sub_devid = pci_get_subdevice(dev); for (m = aac_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid) && (m->subvendor == sub_vendid) && (m->subdevice == sub_devid)) return (m); } for (m = aac_family_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid)) return (m); } return (NULL); }
/* * Attempt to find a match for the given device in * the given device table. * * Returns the device structure or NULL if no matching * PCI device is found. */ static const struct pci_device_id * ath_pci_probe_device(device_t dev, const struct pci_device_id *dev_table, int nentries) { int i; int vendor_id, device_id; int sub_vendor_id, sub_device_id; vendor_id = pci_get_vendor(dev); device_id = pci_get_device(dev); sub_vendor_id = pci_get_subvendor(dev); sub_device_id = pci_get_subdevice(dev); for (i = 0; i < nentries; i++) { /* Don't match on non-populated (eg empty) entries */ if (! dev_table[i].match_populated) continue; if (dev_table[i].match_vendor_id && (dev_table[i].vendor_id != vendor_id)) continue; if (dev_table[i].match_device_id && (dev_table[i].device_id != device_id)) continue; if (dev_table[i].match_sub_vendor_id && (dev_table[i].sub_vendor_id != sub_vendor_id)) continue; if (dev_table[i].match_sub_device_id && (dev_table[i].sub_device_id != sub_device_id)) continue; /* Match */ return (&dev_table[i]); } return (NULL); }
static int siba_bwn_attach(device_t dev) { struct siba_bwn_softc *ssc = device_get_softc(dev); struct siba_softc *siba = &ssc->ssc_siba; siba->siba_dev = dev; siba->siba_type = SIBA_TYPE_PCI; /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ siba->siba_mem_rid = SIBA_PCIR_BAR; siba->siba_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &siba->siba_mem_rid, RF_ACTIVE); if (siba->siba_mem_res == NULL) { device_printf(dev, "cannot map register space\n"); return (ENXIO); } siba->siba_mem_bt = rman_get_bustag(siba->siba_mem_res); siba->siba_mem_bh = rman_get_bushandle(siba->siba_mem_res); /* Get more PCI information */ siba->siba_pci_did = pci_get_device(dev); siba->siba_pci_vid = pci_get_vendor(dev); siba->siba_pci_subvid = pci_get_subvendor(dev); siba->siba_pci_subdid = pci_get_subdevice(dev); siba->siba_pci_revid = pci_get_revid(dev); return (siba_core_attach(siba)); }
static void ataaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct ata_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ata_channel *)cam_sim_softc(sim); dev = ch->dev; switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ata_check_ids(dev, ccb)) return; if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << ccb->ccb_h.target_id)) == 0) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } if (ch->running) device_printf(dev, "already running!\n"); if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { struct ata_res *res = &ccb->ataio.res; bzero(res, sizeof(*res)); if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { res->lba_high = 0; res->lba_mid = 0; } else { res->lba_high = 0xeb; res->lba_mid = 0x14; } ccb->ccb_h.status = CAM_REQ_CMP; break; } ata_cam_begin_transaction(dev, ccb); return; case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (ch->flags & ATA_SATA) { if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.sata.mode); } else d->mode = cts->xport_specific.sata.mode; } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; } else { if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.ata.mode); } else d->mode = cts->xport_specific.ata.mode; } if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) d->bytecount = cts->xport_specific.ata.bytecount; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) d->atapi = cts->xport_specific.ata.atapi; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS) d->caps = cts->xport_specific.ata.caps; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; if (ch->flags & ATA_SATA) { cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.sata.valid = 0; cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.sata.revision = ATA_GETREV(dev, ccb->ccb_h.target_id); if (cts->xport_specific.sata.revision != 0xff) { cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; } cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; } cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; } else { cts->transport = XPORT_ATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.ata.valid = 0; cts->xport_specific.ata.mode = d->mode; cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; cts->xport_specific.ata.bytecount = d->bytecount; cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.ata.caps = d->caps & CTS_ATA_CAPS_D; if (!(ch->flags & ATA_NO_48BIT_DMA)) cts->xport_specific.ata.caps |= CTS_ATA_CAPS_H_DMA48; cts->xport_specific.ata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else cts->xport_specific.ata.caps = d->caps; cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS; cts->xport_specific.ata.atapi = d->atapi; cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ata_reinit(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN; cpi->hba_eng_cnt = 0; if (ch->flags & ATA_NO_SLAVE) cpi->max_target = 0; else cpi->max_target = 1; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); if (ch->flags & ATA_SATA) cpi->base_transfer_speed = 150000; else cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); if (ch->flags & ATA_SATA) cpi->transport = XPORT_SATA; else cpi->transport = XPORT_ATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; if (device_get_devclass(device_get_parent(parent)) == devclass_find("pci")) { cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); } cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); }
static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him = hba->ldm_adapter.him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); #if __FreeBSD_version >=440000 pci_enable_busmaster(dev); #endif pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = malloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = malloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { free(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; }
static int iir_pci_attach(device_t dev) { struct gdt_softc *gdt; struct resource *irq = NULL; int retries, rid, error = 0; void *ih; u_int8_t protocol; gdt = device_get_softc(dev); mtx_init(&gdt->sc_lock, "iir", NULL, MTX_DEF); /* map DPMEM */ rid = PCI_DPMEM; gdt->sc_dpmem = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (gdt->sc_dpmem == NULL) { device_printf(dev, "can't allocate register resources\n"); error = ENOMEM; goto err; } /* get IRQ */ rid = 0; irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_ACTIVE | RF_SHAREABLE); if (irq == NULL) { device_printf(dev, "can't find IRQ value\n"); error = ENOMEM; goto err; } gdt->sc_devnode = dev; gdt->sc_init_level = 0; gdt->sc_hanum = device_get_unit(dev); gdt->sc_bus = pci_get_bus(dev); gdt->sc_slot = pci_get_slot(dev); gdt->sc_vendor = pci_get_vendor(dev); gdt->sc_device = pci_get_device(dev); gdt->sc_subdevice = pci_get_subdevice(dev); gdt->sc_class = GDT_MPR; /* no FC ctr. if (gdt->sc_device >= GDT_PCI_PRODUCT_FC) gdt->sc_class |= GDT_FC; */ /* initialize RP controller */ /* check and reset interface area */ bus_write_4(gdt->sc_dpmem, GDT_MPR_IC, htole32(GDT_MPR_MAGIC)); if (bus_read_4(gdt->sc_dpmem, GDT_MPR_IC) != htole32(GDT_MPR_MAGIC)) { device_printf(dev, "cannot access DPMEM at 0x%lx (shadowed?)\n", rman_get_start(gdt->sc_dpmem)); error = ENXIO; goto err; } bus_set_region_4(gdt->sc_dpmem, GDT_I960_SZ, htole32(0), GDT_MPR_SZ >> 2); /* Disable everything */ bus_write_1(gdt->sc_dpmem, GDT_EDOOR_EN, bus_read_1(gdt->sc_dpmem, GDT_EDOOR_EN) | 4); bus_write_1(gdt->sc_dpmem, GDT_MPR_EDOOR, 0xff); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_CMD_INDEX, 0); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO, htole32(rman_get_start(gdt->sc_dpmem))); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_CMD_INDX, 0xff); bus_write_1(gdt->sc_dpmem, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_read_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS) != 0xff) { if (--retries == 0) { device_printf(dev, "DEINIT failed\n"); error = ENXIO; goto err; } DELAY(1); } protocol = (uint8_t)le32toh(bus_read_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO)); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); if (protocol != GDT_PROTOCOL_VERSION) { device_printf(dev, "unsupported protocol %d\n", protocol); error = ENXIO; goto err; } /* special command to controller BIOS */ bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO, htole32(0)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + sizeof (u_int32_t), htole32(0)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + 2 * sizeof (u_int32_t), htole32(1)); bus_write_4(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_INFO + 3 * sizeof (u_int32_t), htole32(0)); bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_CMD_INDX, 0xfe); bus_write_1(gdt->sc_dpmem, GDT_MPR_LDOOR, 1); DELAY(20); retries = GDT_RETRIES; while (bus_read_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS) != 0xfe) { if (--retries == 0) { device_printf(dev, "initialization error\n"); error = ENXIO; goto err; } DELAY(1); } bus_write_1(gdt->sc_dpmem, GDT_MPR_IC + GDT_S_STATUS, 0); gdt->sc_ic_all_size = GDT_MPR_SZ; gdt->sc_copy_cmd = gdt_mpr_copy_cmd; gdt->sc_get_status = gdt_mpr_get_status; gdt->sc_intr = gdt_mpr_intr; gdt->sc_release_event = gdt_mpr_release_event; gdt->sc_set_sema0 = gdt_mpr_set_sema0; gdt->sc_test_busy = gdt_mpr_test_busy; /* Allocate a dmatag representing the capabilities of this attachment */ if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignemnt*/1, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/BUS_SPACE_MAXSIZE_32BIT, /*nsegments*/BUS_SPACE_UNRESTRICTED, /*maxsegsz*/BUS_SPACE_MAXSIZE_32BIT, /*flags*/0, /*lockfunc*/busdma_lock_mutex, /*lockarg*/&gdt->sc_lock, &gdt->sc_parent_dmat) != 0) { error = ENXIO; goto err; } gdt->sc_init_level++; if (iir_init(gdt) != 0) { iir_free(gdt); error = ENXIO; goto err; } /* Register with the XPT */ iir_attach(gdt); /* associate interrupt handler */ if (bus_setup_intr(dev, irq, INTR_TYPE_CAM | INTR_MPSAFE, NULL, iir_intr, gdt, &ih )) { device_printf(dev, "Unable to register interrupt handler\n"); error = ENXIO; goto err; } gdt_pci_enable_intr(gdt); return (0); err: if (irq) bus_release_resource( dev, SYS_RES_IRQ, 0, irq ); if (gdt->sc_dpmem) bus_release_resource( dev, SYS_RES_MEMORY, rid, gdt->sc_dpmem ); mtx_destroy(&gdt->sc_lock); return (error); }
/* * generic PCI ATA device probe */ int ata_pci_probe(device_t dev) { if (resource_disabled("atapci", device_get_unit(dev))) return (ENXIO); if (pci_get_class(dev) != PCIC_STORAGE) return ENXIO; /* if this is an AHCI chipset grab it */ if (pci_get_subclass(dev) == PCIS_STORAGE_SATA) { if (!ata_ahci_ident(dev)) return ATA_PROBE_OK; } /* run through the vendor specific drivers */ switch (pci_get_vendor(dev)) { case ATA_ACARD_ID: if (!ata_acard_ident(dev)) return ATA_PROBE_OK; break; case ATA_ACER_LABS_ID: if (!ata_ali_ident(dev)) return ATA_PROBE_OK; break; case ATA_ADAPTEC_ID: if (!ata_adaptec_ident(dev)) return ATA_PROBE_OK; break; case ATA_AMD_ID: if (!ata_amd_ident(dev)) return ATA_PROBE_OK; break; case ATA_ATI_ID: if (!ata_ati_ident(dev)) return ATA_PROBE_OK; break; case ATA_CYRIX_ID: if (!ata_cyrix_ident(dev)) return ATA_PROBE_OK; break; case ATA_CYPRESS_ID: if (!ata_cypress_ident(dev)) return ATA_PROBE_OK; break; case ATA_HIGHPOINT_ID: if (!ata_highpoint_ident(dev)) return ATA_PROBE_OK; break; case ATA_INTEL_ID: if (!ata_intel_ident(dev)) return ATA_PROBE_OK; break; case ATA_ITE_ID: if (!ata_ite_ident(dev)) return ATA_PROBE_OK; break; case ATA_JMICRON_ID: if (!ata_jmicron_ident(dev)) return ATA_PROBE_OK; break; case ATA_MARVELL_ID: if (!ata_marvell_ident(dev)) return ATA_PROBE_OK; break; case ATA_NATIONAL_ID: if (!ata_national_ident(dev)) return ATA_PROBE_OK; break; case ATA_NETCELL_ID: if (!ata_netcell_ident(dev)) return ATA_PROBE_OK; break; case ATA_NVIDIA_ID: if (!ata_nvidia_ident(dev)) return ATA_PROBE_OK; break; case ATA_PROMISE_ID: if (!ata_promise_ident(dev)) return ATA_PROBE_OK; break; case ATA_SERVERWORKS_ID: if (!ata_serverworks_ident(dev)) return ATA_PROBE_OK; break; case ATA_SILICON_IMAGE_ID: if (!ata_sii_ident(dev)) return ATA_PROBE_OK; break; case ATA_SIS_ID: if (!ata_sis_ident(dev)) return ATA_PROBE_OK; break; case ATA_VIA_ID: if (!ata_via_ident(dev)) return ATA_PROBE_OK; break; case ATA_CENATEK_ID: if (!ata_cenatek_ident(dev)) return ATA_PROBE_OK; break; case ATA_MICRON_ID: if (!ata_micron_ident(dev)) return ATA_PROBE_OK; break; } /* unknown chipset, try generic AHCI or DMA if it seems possible */ if (pci_get_subclass(dev) == PCIS_STORAGE_IDE) { uint16_t vendor, device, subvendor, subdevice; const struct none_atapci *e; vendor = pci_get_vendor(dev); device = pci_get_device(dev); subvendor = pci_get_subvendor(dev); subdevice = pci_get_subdevice(dev); for (e = none_atapci_table; e->vendor != 0xffff; ++e) { if (e->vendor == vendor && e->device == device && e->subvendor == subvendor && e->subdevice == subdevice) return ENXIO; } if (!ata_generic_ident(dev)) return ATA_PROBE_OK; } return ENXIO; }
static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; device_t pci_dev; struct ifnet *ifp; q80_offchip_mem_val_t val; qla_rd_pci_ids_t *pci_ids; qla_rd_fw_dump_t *fw_dump; union { qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_erase_flash_t *erf; qla_offchip_mem_val_t *mem; } u; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: u.rv = (qla_reg_val_t *)data; if (u.rv->direct) { if (u.rv->rd) { u.rv->val = READ_REG32(ha, u.rv->reg); } else { WRITE_REG32(ha, u.rv->reg, u.rv->val); } } else { if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val, u.rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.rdf = (qla_rd_flash_t *)data; if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.wrf = (qla_wr_flash_t *)data; if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size, u.wrf->buffer))) { printf("flash write failed[%d]\n", rval); rval = ENXIO; } break; case QLA_ERASE_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.erf = (qla_erase_flash_t *)data; if ((rval = ql_erase_flash(ha, u.erf->off, u.erf->size))) { printf("flash erase failed[%d]\n", rval); rval = ENXIO; } break; case QLA_RDWR_MS_MEM: u.mem = (qla_offchip_mem_val_t *)data; if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, u.mem->rd))) rval = ENXIO; else { u.mem->data_lo = val.data_lo; u.mem->data_hi = val.data_hi; u.mem->data_ulo = val.data_ulo; u.mem->data_uhi = val.data_uhi; } break; case QLA_RD_FW_DUMP_SIZE: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; fw_dump->template_size = ha->hw.dma_buf.minidump.size; fw_dump->pci_func = ha->pci_func; break; case QLA_RD_FW_DUMP: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; if ((fw_dump->md_template == NULL) || (fw_dump->template_size != ha->hw.dma_buf.minidump.size)) { rval = EINVAL; break; } if ((rval = copyout(ha->hw.dma_buf.minidump.dma_b, fw_dump->md_template, fw_dump->template_size))) rval = ENXIO; break; case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; }
static int nm_pci_probe(device_t dev) { struct sc_info *sc = NULL; char *s = NULL; u_int32_t subdev, i; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); switch (pci_get_devid(dev)) { case NM256AV_PCI_ID: i = 0; while ((i < NUM_BADCARDS) && (badcards[i] != subdev)) i++; /* Try to catch other non-ac97 cards */ if (i == NUM_BADCARDS) { if (!(sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "cannot allocate softc\n"); return ENXIO; } sc->regid = PCIR_BAR(1); sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regid, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to map register space\n"); free(sc, M_DEVBUF); return ENXIO; } /* * My Panasonic CF-M2EV needs resetting device * before checking mixer is present or not. * [email protected]. */ nm_wr(sc, 0, 0x11, 1); /* reset device */ if ((nm_rd(sc, NM_MIXER_PRESENCE, 2) & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) { i = 0; /* non-ac97 card, but not listed */ DEB(device_printf(dev, "subdev = 0x%x - badcard?\n", subdev)); } bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); free(sc, M_DEVBUF); } if (i == NUM_BADCARDS) s = "NeoMagic 256AV"; DEB(else) DEB(device_printf(dev, "this is a non-ac97 NM256AV, not attaching\n")); break; case NM256ZX_PCI_ID: s = "NeoMagic 256ZX"; break; } if (s) device_set_desc(dev, s); return s? 0 : ENXIO; }
int qla_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_rd_pci_ids_t *pci_ids; device_t pci_dev; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: rv = (qla_reg_val_t *)data; if (rv->direct) { if (rv->rd) { rv->val = READ_OFFSET32(ha, rv->reg); } else { WRITE_OFFSET32(ha, rv->reg, rv->val); } } else { if ((rval = qla_rdwr_indreg32(ha, rv->reg, &rv->val, rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: rdf = (qla_rd_flash_t *)data; if ((rval = qla_rd_flash32(ha, rdf->off, &rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: wrf = (qla_wr_flash_t *)data; if ((rval = qla_wr_flash_buffer(ha, wrf->off, wrf->size, wrf->buffer, wrf->pattern))) rval = ENXIO; break; case QLA_ERASE_FLASH: if (qla_erase_flash(ha, ((qla_erase_flash_t *)data)->off, ((qla_erase_flash_t *)data)->size)) rval = ENXIO; break; case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; }
static int ds_pci_attach(device_t dev) { u_int32_t subdev, i; struct sc_info *sc; struct ac97_info *codec = NULL; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_ds1 softc"); sc->dev = dev; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); sc->type = ds_finddev(pci_get_devid(dev), subdev); sc->rev = pci_get_revid(dev); pci_enable_busmaster(dev); sc->regid = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regid, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to map register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->bufsz = pcm_getbuffersize(dev, 4096, DS1_BUFFSIZE, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &sc->buffer_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } sc->regbase = NULL; if (ds_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } codec = AC97_CREATE(dev, sc, ds_ac97); if (codec == NULL) goto bad; /* * Turn on inverted external amplifier sense flags for few * 'special' boards. */ switch (subdev) { case 0x81171033: /* NEC ValueStar (VT550/0) */ ac97_setflags(codec, ac97_getflags(codec) | AC97_F_EAPD_INV); break; default: break; } mixer_init(dev, ac97_getmixerclass(), codec); sc->irqid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, INTR_MPSAFE, ds_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } snprintf(status, SND_STATUSLEN, "at memory 0x%lx irq %ld %s", rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_ds1)); if (pcm_register(dev, sc, DS1_CHANS, 2)) goto bad; for (i = 0; i < DS1_CHANS; i++) pcm_addchan(dev, PCMDIR_PLAY, &ds1pchan_class, sc); for (i = 0; i < 2; i++) pcm_addchan(dev, PCMDIR_REC, &ds1rchan_class, sc); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->reg) bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->buffer_dmat) bus_dma_tag_destroy(sc->buffer_dmat); if (sc->control_dmat) bus_dma_tag_destroy(sc->control_dmat); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return ENXIO; }
static void agg_init(struct agg_info* ess) { u_int32_t data; /* Setup PCI config registers. */ /* Disable all legacy emulations. */ data = pci_read_config(ess->dev, CONF_LEGACY, 2); data |= LEGACY_DISABLED; pci_write_config(ess->dev, CONF_LEGACY, data, 2); /* Disconnect from CHI. (Makes Dell inspiron 7500 work?) * Enable posted write. * Prefer PCI timing rather than that of ISA. * Don't swap L/R. */ data = pci_read_config(ess->dev, CONF_MAESTRO, 4); data |= MAESTRO_CHIBUS | MAESTRO_POSTEDWRITE | MAESTRO_DMA_PCITIMING; data &= ~MAESTRO_SWAP_LR; pci_write_config(ess->dev, CONF_MAESTRO, data, 4); /* Reset direct sound. */ bus_space_write_2(ess->st, ess->sh, PORT_HOSTINT_CTRL, HOSTINT_CTRL_DSOUND_RESET); DELAY(10000); /* XXX - too long? */ bus_space_write_2(ess->st, ess->sh, PORT_HOSTINT_CTRL, 0); DELAY(10000); /* Enable direct sound interruption and hardware volume control. */ bus_space_write_2(ess->st, ess->sh, PORT_HOSTINT_CTRL, HOSTINT_CTRL_DSOUND_INT_ENABLED | HOSTINT_CTRL_HWVOL_ENABLED); /* Setup Wave Processor. */ /* Enable WaveCache, set DMA base address. */ wp_wrreg(ess, WPREG_WAVE_ROMRAM, WP_WAVE_VIRTUAL_ENABLED | WP_WAVE_DRAM_ENABLED); bus_space_write_2(ess->st, ess->sh, PORT_WAVCACHE_CTRL, WAVCACHE_ENABLED | WAVCACHE_WTSIZE_4MB); for (data = WAVCACHE_PCMBAR; data < WAVCACHE_PCMBAR + 4; data++) wc_wrreg(ess, data, ess->baseaddr >> WAVCACHE_BASEADDR_SHIFT); /* Setup Codec/Ringbus. */ agg_initcodec(ess); bus_space_write_4(ess->st, ess->sh, PORT_RINGBUS_CTRL, RINGBUS_CTRL_RINGBUS_ENABLED | RINGBUS_CTRL_ACLINK_ENABLED); wp_wrreg(ess, WPREG_BASE, 0x8500); /* Parallel I/O */ ringbus_setdest(ess, RINGBUS_SRC_ADC, RINGBUS_DEST_STEREO | RINGBUS_DEST_DSOUND_IN); ringbus_setdest(ess, RINGBUS_SRC_DSOUND, RINGBUS_DEST_STEREO | RINGBUS_DEST_DAC); /* Setup ASSP. Needed for Dell Inspiron 7500? */ bus_space_write_1(ess->st, ess->sh, PORT_ASSP_CTRL_B, 0x00); bus_space_write_1(ess->st, ess->sh, PORT_ASSP_CTRL_A, 0x03); bus_space_write_1(ess->st, ess->sh, PORT_ASSP_CTRL_C, 0x00); /* * Setup GPIO. * There seems to be speciality with NEC systems. */ switch (pci_get_subvendor(ess->dev) | (pci_get_subdevice(ess->dev) << 16)) { case NEC_SUBID1: case NEC_SUBID2: /* Matthew Braithwaite <*****@*****.**> reported that * NEC Versa LX doesn't need GPIO operation. */ bus_space_write_2(ess->st, ess->sh, PORT_GPIO_MASK, 0x9ff); bus_space_write_2(ess->st, ess->sh, PORT_GPIO_DIR, bus_space_read_2(ess->st, ess->sh, PORT_GPIO_DIR) | 0x600); bus_space_write_2(ess->st, ess->sh, PORT_GPIO_DATA, 0x200); break; } }
static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; device_t pci_dev; struct ifnet *ifp; int count; q80_offchip_mem_val_t val; qla_rd_pci_ids_t *pci_ids; qla_rd_fw_dump_t *fw_dump; union { qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_erase_flash_t *erf; qla_offchip_mem_val_t *mem; } u; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: u.rv = (qla_reg_val_t *)data; if (u.rv->direct) { if (u.rv->rd) { u.rv->val = READ_REG32(ha, u.rv->reg); } else { WRITE_REG32(ha, u.rv->reg, u.rv->val); } } else { if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val, u.rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.rdf = (qla_rd_flash_t *)data; if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.wrf = (qla_wr_flash_t *)data; if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size, u.wrf->buffer))) { printf("flash write failed[%d]\n", rval); rval = ENXIO; } break; case QLA_ERASE_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.erf = (qla_erase_flash_t *)data; if ((rval = ql_erase_flash(ha, u.erf->off, u.erf->size))) { printf("flash erase failed[%d]\n", rval); rval = ENXIO; } break; case QLA_RDWR_MS_MEM: u.mem = (qla_offchip_mem_val_t *)data; if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, u.mem->rd))) rval = ENXIO; else { u.mem->data_lo = val.data_lo; u.mem->data_hi = val.data_hi; u.mem->data_ulo = val.data_ulo; u.mem->data_uhi = val.data_uhi; } break; case QLA_RD_FW_DUMP_SIZE: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; fw_dump->minidump_size = ha->hw.mdump_buffer_size + ha->hw.mdump_template_size; fw_dump->pci_func = ha->pci_func; break; case QLA_RD_FW_DUMP: if (ha->hw.mdump_init == 0) { device_printf(pci_dev, "%s: minidump not initialized\n", __func__); rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; if ((fw_dump->minidump == NULL) || (fw_dump->minidump_size != (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size))) { device_printf(pci_dev, "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__, fw_dump->minidump, fw_dump->minidump_size, (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size)); rval = EINVAL; break; } if ((ha->pci_func & 0x1)) { device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__); rval = ENXIO; break; } fw_dump->saved = 1; if (ha->offline) { if (ha->enable_minidump) ql_minidump(ha); fw_dump->saved = 0; fw_dump->usec_ts = ha->hw.mdump_usec_ts; if (!ha->hw.mdump_done) { device_printf(pci_dev, "%s: port offline minidump failed\n", __func__); rval = ENXIO; break; } } else { #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5) if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) { if (!ha->hw.mdump_done) { fw_dump->saved = 0; QL_INITIATE_RECOVERY(ha); device_printf(pci_dev, "%s: recovery initiated " " to trigger minidump\n", __func__); } QLA_UNLOCK(ha, __func__); } else { device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__); rval = ENXIO; break; } #define QLNX_DUMP_WAIT_SECS 30 count = QLNX_DUMP_WAIT_SECS * 1000; while (count) { if (ha->hw.mdump_done) break; qla_mdelay(__func__, 100); count -= 100; } if (!ha->hw.mdump_done) { device_printf(pci_dev, "%s: port not offline minidump failed\n", __func__); rval = ENXIO; break; } fw_dump->usec_ts = ha->hw.mdump_usec_ts; if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) { ha->hw.mdump_done = 0; QLA_UNLOCK(ha, __func__); } else { device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__); rval = ENXIO; break; } } if ((rval = copyout(ha->hw.mdump_template, fw_dump->minidump, ha->hw.mdump_template_size))) { device_printf(pci_dev, "%s: template copyout failed\n", __func__); rval = ENXIO; break; } if ((rval = copyout(ha->hw.mdump_buffer, ((uint8_t *)fw_dump->minidump + ha->hw.mdump_template_size), ha->hw.mdump_buffer_size))) { device_printf(pci_dev, "%s: minidump copyout failed\n", __func__); rval = ENXIO; } break; case QLA_RD_DRVR_STATE: rval = ql_drvr_state(ha, (qla_driver_state_t *)data); break; case QLA_RD_SLOWPATH_LOG: rval = ql_slowpath_log(ha, (qla_sp_log_t *)data); break; case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; }
static void atiixp_chip_post_init(void *arg) { struct atiixp_info *sc = (struct atiixp_info *)arg; uint32_t subdev; int i, timeout, found; char status[SND_STATUSLEN]; atiixp_lock(sc); if (sc->delayed_attach.ich_func) { config_intrhook_disestablish(&sc->delayed_attach); sc->delayed_attach.ich_func = NULL; } /* wait for the interrupts to happen */ timeout = 100; while (--timeout) { snd_mtxsleep(sc, sc->lock, 0, "ixpslp", 1); if (sc->codec_not_ready_bits) break; } atiixp_disable_interrupts(sc); if (timeout == 0) { device_printf(sc->dev, "WARNING: timeout during codec detection; " "codecs might be present but haven't interrupted\n"); atiixp_unlock(sc); goto postinitbad; } found = 0; /* * ATI IXP can have upto 3 codecs, but single codec should be * suffice for now. */ if (!(sc->codec_not_ready_bits & ATI_REG_ISR_CODEC0_NOT_READY)) { /* codec 0 present */ sc->codec_found++; sc->codec_idx = 0; found++; } if (!(sc->codec_not_ready_bits & ATI_REG_ISR_CODEC1_NOT_READY)) { /* codec 1 present */ sc->codec_found++; } if (!(sc->codec_not_ready_bits & ATI_REG_ISR_CODEC2_NOT_READY)) { /* codec 2 present */ sc->codec_found++; } atiixp_unlock(sc); if (found == 0) goto postinitbad; /* create/init mixer */ sc->codec = AC97_CREATE(sc->dev, sc, atiixp_ac97); if (sc->codec == NULL) goto postinitbad; subdev = (pci_get_subdevice(sc->dev) << 16) | pci_get_subvendor(sc->dev); switch (subdev) { case 0x11831043: /* ASUS A6R */ case 0x2043161f: /* Maxselect x710s - http://maxselect.ru/ */ ac97_setflags(sc->codec, ac97_getflags(sc->codec) | AC97_F_EAPD_INV); break; default: break; } mixer_init(sc->dev, ac97_getmixerclass(), sc->codec); if (pcm_register(sc->dev, sc, ATI_IXP_NPCHAN, ATI_IXP_NRCHAN)) goto postinitbad; for (i = 0; i < ATI_IXP_NPCHAN; i++) pcm_addchan(sc->dev, PCMDIR_PLAY, &atiixp_chan_class, sc); for (i = 0; i < ATI_IXP_NRCHAN; i++) pcm_addchan(sc->dev, PCMDIR_REC, &atiixp_chan_class, sc); ksnprintf(status, SND_STATUSLEN, "at memory 0x%lx irq %ld %s", rman_get_start(sc->reg), rman_get_start(sc->irq), PCM_KLDSTRING(snd_atiixp)); pcm_setstatus(sc->dev, status); atiixp_lock(sc); atiixp_enable_interrupts(sc); atiixp_unlock(sc); return; postinitbad: atiixp_release_resource(sc); }
static int ad_attach(device_t dev) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); struct ad_softc *adp; device_t parent; /* check that we have a virgin disk to attach */ if (device_get_ivars(dev)) return EEXIST; if (!(adp = malloc(sizeof(struct ad_softc), M_AD, M_NOWAIT | M_ZERO))) { device_printf(dev, "out of memory\n"); return ENOMEM; } device_set_ivars(dev, adp); /* get device geometry into internal structs */ if (ad_get_geometry(dev)) return ENXIO; /* set the max size if configured */ if (ata_setmax) ad_set_geometry(dev); /* init device parameters */ ad_init(dev); /* announce we are here */ ad_describe(dev); /* create the disk device */ adp->disk = disk_alloc(); adp->disk->d_strategy = ad_strategy; adp->disk->d_ioctl = ad_ioctl; adp->disk->d_dump = ad_dump; adp->disk->d_name = "ad"; adp->disk->d_drv1 = dev; adp->disk->d_maxsize = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) adp->disk->d_maxsize = min(adp->disk->d_maxsize, 65536 * DEV_BSIZE); else /* 28bit ATA command limit */ adp->disk->d_maxsize = min(adp->disk->d_maxsize, 256 * DEV_BSIZE); adp->disk->d_sectorsize = DEV_BSIZE; adp->disk->d_mediasize = DEV_BSIZE * (off_t)adp->total_secs; adp->disk->d_fwsectors = adp->sectors; adp->disk->d_fwheads = adp->heads; adp->disk->d_unit = device_get_unit(dev); if (atadev->param.support.command2 & ATA_SUPPORT_FLUSHCACHE) adp->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((atadev->param.support.command2 & ATA_SUPPORT_CFA) || atadev->param.config == ATA_PROTO_CFA) adp->disk->d_flags |= DISKFLAG_CANDELETE; strlcpy(adp->disk->d_ident, atadev->param.serial, sizeof(adp->disk->d_ident)); strlcpy(adp->disk->d_descr, atadev->param.model, sizeof(adp->disk->d_descr)); parent = device_get_parent(ch->dev); if (parent != NULL && device_get_parent(parent) != NULL && (device_get_devclass(parent) == devclass_find("atapci") || device_get_devclass(device_get_parent(parent)) == devclass_find("pci"))) { adp->disk->d_hba_vendor = pci_get_vendor(parent); adp->disk->d_hba_device = pci_get_device(parent); adp->disk->d_hba_subvendor = pci_get_subvendor(parent); adp->disk->d_hba_subdevice = pci_get_subdevice(parent); } ata_disk_firmware_geom_adjust(adp->disk); disk_create(adp->disk, DISK_VERSION); device_add_child(dev, "subdisk", device_get_unit(dev)); bus_generic_attach(dev); callout_init(&atadev->spindown_timer, 1); return 0; }
static int tws_attach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); u_int32_t bar; int error=0,i; /* no tracing yet */ /* Look up our softc and initialize its fields. */ sc->tws_dev = dev; sc->device_id = pci_get_device(dev); sc->subvendor_id = pci_get_subvendor(dev); sc->subdevice_id = pci_get_subdevice(dev); /* Intialize mutexes */ mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF); mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF); mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF); mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE); if ( tws_init_trace_q(sc) == FAILURE ) printf("trace init failure\n"); /* send init event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_START); mtx_unlock(&sc->gen_lock); #if _BYTE_ORDER == _BIG_ENDIAN TWS_TRACE(sc, "BIG endian", 0, 0); #endif /* sysctl context setup */ sysctl_ctx_init(&sc->tws_clist); sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if ( sc->tws_oidp == NULL ) { tws_log(sc, SYSCTL_TREE_NODE_ADD); goto attach_fail_1; } SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp), OID_AUTO, "driver_version", CTLFLAG_RD, TWS_DRIVER_VERSION_STRING, 0, "TWS driver version"); pci_enable_busmaster(dev); bar = pci_read_config(dev, TWS_PCI_BAR0, 4); TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0); bar = pci_read_config(dev, TWS_PCI_BAR1, 4); bar = bar & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0); /* MFA base address is BAR2 register used for * push mode. Firmware will evatualy move to * pull mode during witch this needs to change */ #ifndef TWS_PULL_MODE_ENABLE sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4); sc->mfa_base = sc->mfa_base & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0); #endif /* allocate MMIO register space */ sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */ if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_1; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); #ifndef TWS_PULL_MODE_ENABLE /* Allocate bus space for inbound mfa */ sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */ if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_2; } sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res); sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res); #endif /* Allocate and register our interrupt. */ sc->intr_type = TWS_INTx; /* default */ if ( tws_enable_msi ) sc->intr_type = TWS_MSI; if ( tws_setup_irq(sc) == FAILURE ) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_3; } /* * Create a /dev entry for this device. The kernel will assign us * a major number automatically. We use the unit number of this * device as the minor number and name the character device * "tws<unit>". */ sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u", device_get_unit(dev)); sc->tws_cdev->si_drv1 = sc; if ( tws_init(sc) == FAILURE ) { tws_log(sc, TWS_INIT_FAILURE); goto attach_fail_4; } if ( tws_init_ctlr(sc) == FAILURE ) { tws_log(sc, TWS_CTLR_INIT_FAILURE); goto attach_fail_4; } if ((error = tws_cam_attach(sc))) { tws_log(sc, TWS_CAM_ATTACH); goto attach_fail_4; } /* send init complete event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_COMPLETE); mtx_unlock(&sc->gen_lock); TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id); return(0); attach_fail_4: tws_teardown_intr(sc); destroy_dev(sc->tws_cdev); attach_fail_3: for(i=0;i<sc->irqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus irq res", 0, 0); } } #ifndef TWS_PULL_MODE_ENABLE attach_fail_2: #endif if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id); } attach_fail_1: mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); sysctl_ctx_free(&sc->tws_clist); return (ENXIO); }
static int bwi_pci_attach(device_t dev) { struct bwi_pci_softc *psc = device_get_softc(dev); struct bwi_softc *sc = &psc->sc_sc; int error = ENXIO; sc->sc_dev = dev; /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ sc->sc_mem_rid = BWI_PCIR_BAR; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); /* * Mark device invalid so any interrupts (shared or otherwise) * that arrive before the card is setup are discarded. */ sc->sc_invalid = 1; /* * Arrange interrupt line. */ sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq_res == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, bwi_intr, sc, &sc->sc_irq_handle)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } /* Get more PCI information */ sc->sc_pci_did = pci_get_device(dev); sc->sc_pci_revid = pci_get_revid(dev); sc->sc_pci_subvid = pci_get_subvendor(dev); sc->sc_pci_subdid = pci_get_subdevice(dev); error = bwi_attach(sc); if (error == 0) /* success */ return 0; bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_mem_res); bad: return (error); }