static int bhndb_pci_populate_board_info(device_t dev, device_t child, struct bhnd_board_info *info) { struct bhndb_pci_softc *sc; sc = device_get_softc(dev); /* * On a subset of Apple BCM4360 modules, always prefer the * PCI subdevice to the SPROM-supplied boardtype. * * TODO: * * Broadcom's own drivers implement this override, and then later use * the remapped BCM4360 board type to determine the required * board-specific workarounds. * * Without access to this hardware, it's unclear why this mapping * is done, and we must do the same. If we can survey the hardware * in question, it may be possible to replace this behavior with * explicit references to the SPROM-supplied boardtype(s) in our * quirk definitions. */ if (pci_get_subvendor(sc->parent) == PCI_VENDOR_APPLE) { switch (info->board_type) { case BHND_BOARD_BCM94360X29C: case BHND_BOARD_BCM94360X29CP2: case BHND_BOARD_BCM94360X51: case BHND_BOARD_BCM94360X51P2: info->board_type = 0; /* allow override below */ break; default: break; } } /* If NVRAM did not supply vendor/type/devid info, provide the PCI * subvendor/subdevice/device values. */ if (info->board_vendor == 0) info->board_vendor = pci_get_subvendor(sc->parent); if (info->board_type == 0) info->board_type = pci_get_subdevice(sc->parent); if (info->board_devid == 0) info->board_devid = pci_get_device(sc->parent); return (0); }
int neo_match(struct device *parent, void *match, void *aux) { struct pci_attach_args *pa = (struct pci_attach_args *) aux; #if 0 u_int32_t subdev, badcard; #endif if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_NEOMAGIC) return (0); #if 0 subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); #endif switch (PCI_PRODUCT(pa->pa_id)) { case PCI_PRODUCT_NEOMAGIC_NM256AV: #if 0 i = 0; while ((i < NUM_BADCARDS) && (badcards[i] != subdev)) i++; if (i == NUM_BADCARDS) s = "NeoMagic 256AV"; DEB(else) DEB(device_printf(dev, "this is a non-ac97 NM256AV, not attaching\n")); return (1); #endif case PCI_PRODUCT_NEOMAGIC_NM256ZX: return (1); } return (0); }
static const struct puc_cfg * puc_pci_match(device_t dev, const struct puc_cfg *desc) { uint16_t vendor, device; uint16_t subvendor, subdevice; vendor = pci_get_vendor(dev); device = pci_get_device(dev); subvendor = pci_get_subvendor(dev); subdevice = pci_get_subdevice(dev); while (desc->vendor != 0xffff) { if (desc->vendor == vendor && desc->device == device) { /* exact match */ if (desc->subvendor == subvendor && desc->subdevice == subdevice) return (desc); /* wildcard match */ if (desc->subvendor == 0xffff) return (desc); } desc++; } /* no match */ return (NULL); }
static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him = hba->ldm_adapter.him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = kmalloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { kfree(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = kmalloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { kfree(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; }
/*---------------------------------------------------------------------------* * iwic PCI probe *---------------------------------------------------------------------------*/ static int iwic_pci_probe(device_t dev) { u_int32_t type = pci_get_devid(dev); u_int32_t sv = pci_get_subvendor(dev); u_int32_t sd = pci_get_subdevice(dev); struct winids *wip = win_ids; while(wip->type) { if(wip->type == type) { if(((wip->sv == -1) && (wip->sd == -1)) || ((wip->sv == sv) && (wip->sd == sd))) break; } ++wip; } if(wip->desc) { if(bootverbose) { printf("iwic_pci_probe: vendor = 0x%x, device = 0x%x\n", pci_get_vendor(dev), pci_get_device(dev)); printf("iwic_pci_probe: subvendor = 0x%x, subdevice = 0x%x\n", sv, sd); } device_set_desc(dev, wip->desc); return(0); } else { return(ENXIO); } }
static struct aac_ident * aac_find_ident(device_t dev) { struct aac_ident *m; u_int16_t vendid, devid, sub_vendid, sub_devid; vendid = pci_get_vendor(dev); devid = pci_get_device(dev); sub_vendid = pci_get_subvendor(dev); sub_devid = pci_get_subdevice(dev); for (m = aac_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid) && (m->subvendor == sub_vendid) && (m->subdevice == sub_devid)) return (m); } for (m = aac_family_identifiers; m->vendor != 0; m++) { if ((m->vendor == vendid) && (m->device == devid)) return (m); } return (NULL); }
static int nm_pci_probe(device_t dev) { char *s = NULL; u_int32_t subdev, i; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); switch (pci_get_devid(dev)) { case NM256AV_PCI_ID: i = 0; while ((i < NUM_BADCARDS) && (badcards[i] != subdev)) i++; if (i == NUM_BADCARDS) s = "NeoMagic 256AV"; DEB(else) DEB(device_printf(dev, "this is a non-ac97 NM256AV, not attaching\n")); break; case NM256ZX_PCI_ID: s = "NeoMagic 256ZX"; break; } if (s) device_set_desc(dev, s); return s? 0 : ENXIO; }
static void ahciemaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct ahci_enclosure *enc; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ahciemaction func_code=%x\n", ccb->ccb_h.func_code)); enc = cam_sim_softc(sim); dev = enc->dev; switch (ccb->ccb_h.func_code) { case XPT_ATA_IO: /* Execute the requested I/O operation */ if (ahci_check_ids(dev, ccb)) return; ahci_em_begin_transaction(dev, ccb); return; case XPT_RESET_BUS: /* Reset the specified bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified device */ ahci_em_reset(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN; cpi->hba_eng_cnt = 0; cpi->max_target = 0; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); cpi->base_transfer_speed = 150000; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "AHCI", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); cpi->transport = XPORT_SATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = MAXPHYS; cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); }
static int iop_pci_probe(device_t dev) { /* tested with actual hardware kindly donated by Promise */ if (pci_get_devid(dev) == 0x19628086 && pci_get_subvendor(dev) == 0x105a) { device_set_desc(dev, "Promise SuperTrak SX6000 ATA RAID controller"); return BUS_PROBE_DEFAULT; } /* support the older SuperTrak 100 as well */ if (pci_get_devid(dev) == 0x19608086 && pci_get_subvendor(dev) == 0x105a) { device_set_desc(dev, "Promise SuperTrak 100 ATA RAID controller"); return BUS_PROBE_DEFAULT; } return ENXIO; }
static struct mlx_ident * mlx_pci_match(device_t dev) { struct mlx_ident *m; for (m = mlx_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) && (m->subdevice == pci_get_subdevice(dev))))) return (m); } return (NULL); }
static int ds_pci_probe(device_t dev) { int i; u_int32_t subdev; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); i = ds_finddev(pci_get_devid(dev), subdev); if (i >= 0) { device_set_desc(dev, ds_devs[i].name); return BUS_PROBE_DEFAULT; } else return ENXIO; }
static struct mfi_ident * mfi_find_ident(device_t dev) { struct mfi_ident *m; for (m = mfi_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == pci_get_subvendor(dev)) || (m->subvendor == 0xffff)) && ((m->subdevice == pci_get_subdevice(dev)) || (m->subdevice == 0xffff))) return (m); } return (NULL); }
static struct ida_board * ida_pci_match(device_t dev) { int i; u_int32_t id, sub_id; id = pci_get_devid(dev); sub_id = pci_get_subdevice(dev) << 16 | pci_get_subvendor(dev); if (id == IDA_DEVICEID_SMART || id == IDA_DEVICEID_DEC_SMART || id == IDA_DEVICEID_NCR_53C1510) { for (i = 0; board_id[i].board; i++) if (board_id[i].board == sub_id) return (&board_id[i]); } return (NULL); }
static int mly_pci_probe(device_t dev) { struct mly_ident *m; debug_called(1); for (m = mly_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) && (m->subdevice == pci_get_subdevice(dev))))) { device_set_desc(dev, m->desc); return(-10); /* allow room to be overridden */ } } return(ENXIO); }
static int mlx_pci_probe(device_t dev) { struct mlx_ident *m; debug_called(1); for (m = mlx_identifiers; m->vendor != 0; m++) { if ((m->vendor == pci_get_vendor(dev)) && (m->device == pci_get_device(dev)) && ((m->subvendor == 0) || ((m->subvendor == pci_get_subvendor(dev)) && (m->subdevice == pci_get_subdevice(dev))))) { device_set_desc(dev, m->desc); return(BUS_PROBE_DEFAULT); } } return(ENXIO); }
static struct adw_pci_identity * adw_find_pci_device(device_t dev) { u_int64_t full_id; struct adw_pci_identity *entry; u_int i; full_id = adw_compose_id(pci_get_device(dev), pci_get_vendor(dev), pci_get_subdevice(dev), pci_get_subvendor(dev)); for (i = 0; i < adw_num_pci_devs; i++) { entry = &adw_pci_ident_table[i]; if (entry->full_id == (full_id & entry->id_mask)) return (entry); } return (NULL); }
static int dcphy_attach(device_t dev) { struct mii_softc *sc; struct dc_softc *dc_sc; device_t brdev; sc = device_get_softc(dev); mii_phy_dev_attach(dev, MIIF_NOISOLATE | MIIF_NOMANPAUSE, &dcphy_funcs, 0); /*PHY_RESET(sc);*/ dc_sc = sc->mii_pdata->mii_ifp->if_softc; CSR_WRITE_4(dc_sc, DC_10BTSTAT, 0); CSR_WRITE_4(dc_sc, DC_10BTCTRL, 0); brdev = device_get_parent(sc->mii_dev); switch (pci_get_subdevice(brdev) << 16 | pci_get_subvendor(brdev)) { case COMPAQ_PRESARIO_ID: /* Example of how to only allow 10Mbps modes. */ sc->mii_capabilities = BMSR_ANEG | BMSR_10TFDX | BMSR_10THDX; break; default: if (dc_sc->dc_pmode == DC_PMODE_SIA) sc->mii_capabilities = BMSR_ANEG | BMSR_10TFDX | BMSR_10THDX; else sc->mii_capabilities = BMSR_ANEG | BMSR_100TXFDX | BMSR_100TXHDX | BMSR_10TFDX | BMSR_10THDX; break; } sc->mii_capabilities &= sc->mii_capmask; device_printf(dev, " "); mii_phy_add_media(sc); printf("\n"); MIIBUS_MEDIAINIT(sc->mii_dev); return (0); }
struct csa_card * csa_findsubcard(device_t dev) { int i; struct card_type *card; struct csa_card *subcard; card = csa_findcard(dev); if (card == NULL) return &nocard; subcard = card->cards; i = 0; while (subcard[i].subvendor != 0) { if (pci_get_subvendor(dev) == subcard[i].subvendor && pci_get_subdevice(dev) == subcard[i].subdevice) { return &subcard[i]; } i++; } return &subcard[i]; }
static struct mps_ident * mps_find_ident(device_t dev) { struct mps_ident *m; for (m = mps_identifiers; m->vendor != 0; m++) { if (m->vendor != pci_get_vendor(dev)) continue; if (m->device != pci_get_device(dev)) continue; if ((m->subvendor != 0xffff) && (m->subvendor != pci_get_subvendor(dev))) continue; if ((m->subdevice != 0xffff) && (m->subdevice != pci_get_subdevice(dev))) continue; return (m); } return (NULL); }
const static struct pci_id * uart_pci_match(device_t dev, const struct pci_id *id) { uint16_t device, subdev, subven, vendor; vendor = pci_get_vendor(dev); device = pci_get_device(dev); while (id->vendor != 0xffff && (id->vendor != vendor || id->device != device)) id++; if (id->vendor == 0xffff) return (NULL); if (id->subven == 0xffff) return (id); subven = pci_get_subvendor(dev); subdev = pci_get_subdevice(dev); while (id->vendor == vendor && id->device == device && (id->subven != subven || id->subdev != subdev)) id++; return ((id->vendor == vendor && id->device == device) ? id : NULL); }
static int ixl_probe(device_t dev) { ixl_vendor_info_t *ent; u16 pci_vendor_id, pci_device_id; u16 pci_subvendor_id, pci_subdevice_id; char device_name[256]; #if 0 INIT_DEBUGOUT("ixl_probe: begin"); #endif pci_vendor_id = pci_get_vendor(dev); if (pci_vendor_id != I40E_INTEL_VENDOR_ID) return (ENXIO); pci_device_id = pci_get_device(dev); pci_subvendor_id = pci_get_subvendor(dev); pci_subdevice_id = pci_get_subdevice(dev); ent = ixl_vendor_info_array; while (ent->vendor_id != 0) { if ((pci_vendor_id == ent->vendor_id) && (pci_device_id == ent->device_id) && ((pci_subvendor_id == ent->subvendor_id) || (ent->subvendor_id == 0)) && ((pci_subdevice_id == ent->subdevice_id) || (ent->subdevice_id == 0))) { sprintf(device_name, "%s, Version - %s", ixl_strings[ent->index], ixl_driver_version); device_set_desc_copy(dev, device_name); return (BUS_PROBE_DEFAULT); } ent++; } return (ENXIO); }
/* * Attempt to find a match for the given device in * the given device table. * * Returns the device structure or NULL if no matching * PCI device is found. */ static const struct pci_device_id * ath_pci_probe_device(device_t dev, const struct pci_device_id *dev_table, int nentries) { int i; int vendor_id, device_id; int sub_vendor_id, sub_device_id; vendor_id = pci_get_vendor(dev); device_id = pci_get_device(dev); sub_vendor_id = pci_get_subvendor(dev); sub_device_id = pci_get_subdevice(dev); for (i = 0; i < nentries; i++) { /* Don't match on non-populated (eg empty) entries */ if (! dev_table[i].match_populated) continue; if (dev_table[i].match_vendor_id && (dev_table[i].vendor_id != vendor_id)) continue; if (dev_table[i].match_device_id && (dev_table[i].device_id != device_id)) continue; if (dev_table[i].match_sub_vendor_id && (dev_table[i].sub_vendor_id != sub_vendor_id)) continue; if (dev_table[i].match_sub_device_id && (dev_table[i].sub_device_id != sub_device_id)) continue; /* Match */ return (&dev_table[i]); } return (NULL); }
static int siba_bwn_attach(device_t dev) { struct siba_bwn_softc *ssc = device_get_softc(dev); struct siba_softc *siba = &ssc->ssc_siba; siba->siba_dev = dev; siba->siba_type = SIBA_TYPE_PCI; /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ siba->siba_mem_rid = SIBA_PCIR_BAR; siba->siba_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &siba->siba_mem_rid, RF_ACTIVE); if (siba->siba_mem_res == NULL) { device_printf(dev, "cannot map register space\n"); return (ENXIO); } siba->siba_mem_bt = rman_get_bustag(siba->siba_mem_res); siba->siba_mem_bh = rman_get_bushandle(siba->siba_mem_res); /* Get more PCI information */ siba->siba_pci_did = pci_get_device(dev); siba->siba_pci_vid = pci_get_vendor(dev); siba->siba_pci_subvid = pci_get_subvendor(dev); siba->siba_pci_subdid = pci_get_subdevice(dev); siba->siba_pci_revid = pci_get_revid(dev); return (siba_core_attach(siba)); }
static void ataaction(struct cam_sim *sim, union ccb *ccb) { device_t dev, parent; struct ata_channel *ch; CAM_DEBUG(ccb->ccb_h.path, CAM_DEBUG_TRACE, ("ataaction func_code=%x\n", ccb->ccb_h.func_code)); ch = (struct ata_channel *)cam_sim_softc(sim); dev = ch->dev; switch (ccb->ccb_h.func_code) { /* Common cases first */ case XPT_ATA_IO: /* Execute the requested I/O operation */ case XPT_SCSI_IO: if (ata_check_ids(dev, ccb)) return; if ((ch->devices & ((ATA_ATA_MASTER | ATA_ATAPI_MASTER) << ccb->ccb_h.target_id)) == 0) { ccb->ccb_h.status = CAM_SEL_TIMEOUT; break; } if (ch->running) device_printf(dev, "already running!\n"); if (ccb->ccb_h.func_code == XPT_ATA_IO && (ccb->ataio.cmd.flags & CAM_ATAIO_CONTROL) && (ccb->ataio.cmd.control & ATA_A_RESET)) { struct ata_res *res = &ccb->ataio.res; bzero(res, sizeof(*res)); if (ch->devices & (ATA_ATA_MASTER << ccb->ccb_h.target_id)) { res->lba_high = 0; res->lba_mid = 0; } else { res->lba_high = 0xeb; res->lba_mid = 0x14; } ccb->ccb_h.status = CAM_REQ_CMP; break; } ata_cam_begin_transaction(dev, ccb); return; case XPT_EN_LUN: /* Enable LUN as a target */ case XPT_TARGET_IO: /* Execute target I/O request */ case XPT_ACCEPT_TARGET_IO: /* Accept Host Target Mode CDB */ case XPT_CONT_TARGET_IO: /* Continue Host Target I/O Connection*/ case XPT_ABORT: /* Abort the specified CCB */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_SET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; if (ch->flags & ATA_SATA) { if (cts->xport_specific.sata.valid & CTS_SATA_VALID_REVISION) d->revision = cts->xport_specific.sata.revision; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.sata.mode); } else d->mode = cts->xport_specific.sata.mode; } if (cts->xport_specific.sata.valid & CTS_SATA_VALID_BYTECOUNT) d->bytecount = min(8192, cts->xport_specific.sata.bytecount); if (cts->xport_specific.sata.valid & CTS_SATA_VALID_ATAPI) d->atapi = cts->xport_specific.sata.atapi; if (cts->xport_specific.sata.valid & CTS_SATA_VALID_CAPS) d->caps = cts->xport_specific.sata.caps; } else { if (cts->xport_specific.ata.valid & CTS_ATA_VALID_MODE) { if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { d->mode = ATA_SETMODE(ch->dev, ccb->ccb_h.target_id, cts->xport_specific.ata.mode); } else d->mode = cts->xport_specific.ata.mode; } if (cts->xport_specific.ata.valid & CTS_ATA_VALID_BYTECOUNT) d->bytecount = cts->xport_specific.ata.bytecount; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_ATAPI) d->atapi = cts->xport_specific.ata.atapi; if (cts->xport_specific.ata.valid & CTS_ATA_VALID_CAPS) d->caps = cts->xport_specific.ata.caps; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_GET_TRAN_SETTINGS: { struct ccb_trans_settings *cts = &ccb->cts; struct ata_cam_device *d; if (ata_check_ids(dev, ccb)) return; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) d = &ch->curr[ccb->ccb_h.target_id]; else d = &ch->user[ccb->ccb_h.target_id]; cts->protocol = PROTO_UNSPECIFIED; cts->protocol_version = PROTO_VERSION_UNSPECIFIED; if (ch->flags & ATA_SATA) { cts->transport = XPORT_SATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.sata.valid = 0; cts->xport_specific.sata.mode = d->mode; cts->xport_specific.sata.valid |= CTS_SATA_VALID_MODE; cts->xport_specific.sata.bytecount = d->bytecount; cts->xport_specific.sata.valid |= CTS_SATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.sata.revision = ATA_GETREV(dev, ccb->ccb_h.target_id); if (cts->xport_specific.sata.revision != 0xff) { cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; } cts->xport_specific.sata.caps = d->caps & CTS_SATA_CAPS_D; if (ch->pm_level) { cts->xport_specific.sata.caps |= CTS_SATA_CAPS_H_PMREQ; } cts->xport_specific.sata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else { cts->xport_specific.sata.revision = d->revision; cts->xport_specific.sata.valid |= CTS_SATA_VALID_REVISION; cts->xport_specific.sata.caps = d->caps; } cts->xport_specific.sata.valid |= CTS_SATA_VALID_CAPS; cts->xport_specific.sata.atapi = d->atapi; cts->xport_specific.sata.valid |= CTS_SATA_VALID_ATAPI; } else { cts->transport = XPORT_ATA; cts->transport_version = XPORT_VERSION_UNSPECIFIED; cts->xport_specific.ata.valid = 0; cts->xport_specific.ata.mode = d->mode; cts->xport_specific.ata.valid |= CTS_ATA_VALID_MODE; cts->xport_specific.ata.bytecount = d->bytecount; cts->xport_specific.ata.valid |= CTS_ATA_VALID_BYTECOUNT; if (cts->type == CTS_TYPE_CURRENT_SETTINGS) { cts->xport_specific.ata.caps = d->caps & CTS_ATA_CAPS_D; if (!(ch->flags & ATA_NO_48BIT_DMA)) cts->xport_specific.ata.caps |= CTS_ATA_CAPS_H_DMA48; cts->xport_specific.ata.caps &= ch->user[ccb->ccb_h.target_id].caps; } else cts->xport_specific.ata.caps = d->caps; cts->xport_specific.ata.valid |= CTS_ATA_VALID_CAPS; cts->xport_specific.ata.atapi = d->atapi; cts->xport_specific.ata.valid |= CTS_ATA_VALID_ATAPI; } ccb->ccb_h.status = CAM_REQ_CMP; break; } case XPT_RESET_BUS: /* Reset the specified SCSI bus */ case XPT_RESET_DEV: /* Bus Device Reset the specified SCSI device */ ata_reinit(dev); ccb->ccb_h.status = CAM_REQ_CMP; break; case XPT_TERM_IO: /* Terminate the I/O process */ /* XXX Implement */ ccb->ccb_h.status = CAM_REQ_INVALID; break; case XPT_PATH_INQ: /* Path routing inquiry */ { struct ccb_pathinq *cpi = &ccb->cpi; parent = device_get_parent(dev); cpi->version_num = 1; /* XXX??? */ cpi->hba_inquiry = PI_SDTR_ABLE; cpi->target_sprt = 0; cpi->hba_misc = PIM_SEQSCAN; cpi->hba_eng_cnt = 0; if (ch->flags & ATA_NO_SLAVE) cpi->max_target = 0; else cpi->max_target = 1; cpi->max_lun = 0; cpi->initiator_id = 0; cpi->bus_id = cam_sim_bus(sim); if (ch->flags & ATA_SATA) cpi->base_transfer_speed = 150000; else cpi->base_transfer_speed = 3300; strncpy(cpi->sim_vid, "FreeBSD", SIM_IDLEN); strncpy(cpi->hba_vid, "ATA", HBA_IDLEN); strncpy(cpi->dev_name, cam_sim_name(sim), DEV_IDLEN); cpi->unit_number = cam_sim_unit(sim); if (ch->flags & ATA_SATA) cpi->transport = XPORT_SATA; else cpi->transport = XPORT_ATA; cpi->transport_version = XPORT_VERSION_UNSPECIFIED; cpi->protocol = PROTO_ATA; cpi->protocol_version = PROTO_VERSION_UNSPECIFIED; cpi->maxio = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; if (device_get_devclass(device_get_parent(parent)) == devclass_find("pci")) { cpi->hba_vendor = pci_get_vendor(parent); cpi->hba_device = pci_get_device(parent); cpi->hba_subvendor = pci_get_subvendor(parent); cpi->hba_subdevice = pci_get_subdevice(parent); } cpi->ccb_h.status = CAM_REQ_CMP; break; } default: ccb->ccb_h.status = CAM_REQ_INVALID; break; } xpt_done(ccb); }
static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; device_t pci_dev; struct ifnet *ifp; int count; q80_offchip_mem_val_t val; qla_rd_pci_ids_t *pci_ids; qla_rd_fw_dump_t *fw_dump; union { qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_erase_flash_t *erf; qla_offchip_mem_val_t *mem; } u; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: u.rv = (qla_reg_val_t *)data; if (u.rv->direct) { if (u.rv->rd) { u.rv->val = READ_REG32(ha, u.rv->reg); } else { WRITE_REG32(ha, u.rv->reg, u.rv->val); } } else { if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val, u.rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.rdf = (qla_rd_flash_t *)data; if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.wrf = (qla_wr_flash_t *)data; if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size, u.wrf->buffer))) { printf("flash write failed[%d]\n", rval); rval = ENXIO; } break; case QLA_ERASE_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & IFF_DRV_RUNNING) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.erf = (qla_erase_flash_t *)data; if ((rval = ql_erase_flash(ha, u.erf->off, u.erf->size))) { printf("flash erase failed[%d]\n", rval); rval = ENXIO; } break; case QLA_RDWR_MS_MEM: u.mem = (qla_offchip_mem_val_t *)data; if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, u.mem->rd))) rval = ENXIO; else { u.mem->data_lo = val.data_lo; u.mem->data_hi = val.data_hi; u.mem->data_ulo = val.data_ulo; u.mem->data_uhi = val.data_uhi; } break; case QLA_RD_FW_DUMP_SIZE: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; fw_dump->minidump_size = ha->hw.mdump_buffer_size + ha->hw.mdump_template_size; fw_dump->pci_func = ha->pci_func; break; case QLA_RD_FW_DUMP: if (ha->hw.mdump_init == 0) { device_printf(pci_dev, "%s: minidump not initialized\n", __func__); rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; if ((fw_dump->minidump == NULL) || (fw_dump->minidump_size != (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size))) { device_printf(pci_dev, "%s: minidump buffer [%p] size = [%d, %d] invalid\n", __func__, fw_dump->minidump, fw_dump->minidump_size, (ha->hw.mdump_buffer_size + ha->hw.mdump_template_size)); rval = EINVAL; break; } if ((ha->pci_func & 0x1)) { device_printf(pci_dev, "%s: mindump allowed only on Port0\n", __func__); rval = ENXIO; break; } fw_dump->saved = 1; if (ha->offline) { if (ha->enable_minidump) ql_minidump(ha); fw_dump->saved = 0; fw_dump->usec_ts = ha->hw.mdump_usec_ts; if (!ha->hw.mdump_done) { device_printf(pci_dev, "%s: port offline minidump failed\n", __func__); rval = ENXIO; break; } } else { #define QLA_LOCK_MDUMP_MS_TIMEOUT (QLA_LOCK_DEFAULT_MS_TIMEOUT * 5) if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) { if (!ha->hw.mdump_done) { fw_dump->saved = 0; QL_INITIATE_RECOVERY(ha); device_printf(pci_dev, "%s: recovery initiated " " to trigger minidump\n", __func__); } QLA_UNLOCK(ha, __func__); } else { device_printf(pci_dev, "%s: QLA_LOCK() failed0\n", __func__); rval = ENXIO; break; } #define QLNX_DUMP_WAIT_SECS 30 count = QLNX_DUMP_WAIT_SECS * 1000; while (count) { if (ha->hw.mdump_done) break; qla_mdelay(__func__, 100); count -= 100; } if (!ha->hw.mdump_done) { device_printf(pci_dev, "%s: port not offline minidump failed\n", __func__); rval = ENXIO; break; } fw_dump->usec_ts = ha->hw.mdump_usec_ts; if (QLA_LOCK(ha, __func__, QLA_LOCK_MDUMP_MS_TIMEOUT, 0) == 0) { ha->hw.mdump_done = 0; QLA_UNLOCK(ha, __func__); } else { device_printf(pci_dev, "%s: QLA_LOCK() failed1\n", __func__); rval = ENXIO; break; } } if ((rval = copyout(ha->hw.mdump_template, fw_dump->minidump, ha->hw.mdump_template_size))) { device_printf(pci_dev, "%s: template copyout failed\n", __func__); rval = ENXIO; break; } if ((rval = copyout(ha->hw.mdump_buffer, ((uint8_t *)fw_dump->minidump + ha->hw.mdump_template_size), ha->hw.mdump_buffer_size))) { device_printf(pci_dev, "%s: minidump copyout failed\n", __func__); rval = ENXIO; } break; case QLA_RD_DRVR_STATE: rval = ql_drvr_state(ha, (qla_driver_state_t *)data); break; case QLA_RD_SLOWPATH_LOG: rval = ql_slowpath_log(ha, (qla_sp_log_t *)data); break; case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; }
/* * generic PCI ATA device probe */ int ata_pci_probe(device_t dev) { if (resource_disabled("atapci", device_get_unit(dev))) return (ENXIO); if (pci_get_class(dev) != PCIC_STORAGE) return ENXIO; /* if this is an AHCI chipset grab it */ if (pci_get_subclass(dev) == PCIS_STORAGE_SATA) { if (!ata_ahci_ident(dev)) return ATA_PROBE_OK; } /* run through the vendor specific drivers */ switch (pci_get_vendor(dev)) { case ATA_ACARD_ID: if (!ata_acard_ident(dev)) return ATA_PROBE_OK; break; case ATA_ACER_LABS_ID: if (!ata_ali_ident(dev)) return ATA_PROBE_OK; break; case ATA_ADAPTEC_ID: if (!ata_adaptec_ident(dev)) return ATA_PROBE_OK; break; case ATA_AMD_ID: if (!ata_amd_ident(dev)) return ATA_PROBE_OK; break; case ATA_ATI_ID: if (!ata_ati_ident(dev)) return ATA_PROBE_OK; break; case ATA_CYRIX_ID: if (!ata_cyrix_ident(dev)) return ATA_PROBE_OK; break; case ATA_CYPRESS_ID: if (!ata_cypress_ident(dev)) return ATA_PROBE_OK; break; case ATA_HIGHPOINT_ID: if (!ata_highpoint_ident(dev)) return ATA_PROBE_OK; break; case ATA_INTEL_ID: if (!ata_intel_ident(dev)) return ATA_PROBE_OK; break; case ATA_ITE_ID: if (!ata_ite_ident(dev)) return ATA_PROBE_OK; break; case ATA_JMICRON_ID: if (!ata_jmicron_ident(dev)) return ATA_PROBE_OK; break; case ATA_MARVELL_ID: if (!ata_marvell_ident(dev)) return ATA_PROBE_OK; break; case ATA_NATIONAL_ID: if (!ata_national_ident(dev)) return ATA_PROBE_OK; break; case ATA_NETCELL_ID: if (!ata_netcell_ident(dev)) return ATA_PROBE_OK; break; case ATA_NVIDIA_ID: if (!ata_nvidia_ident(dev)) return ATA_PROBE_OK; break; case ATA_PROMISE_ID: if (!ata_promise_ident(dev)) return ATA_PROBE_OK; break; case ATA_SERVERWORKS_ID: if (!ata_serverworks_ident(dev)) return ATA_PROBE_OK; break; case ATA_SILICON_IMAGE_ID: if (!ata_sii_ident(dev)) return ATA_PROBE_OK; break; case ATA_SIS_ID: if (!ata_sis_ident(dev)) return ATA_PROBE_OK; break; case ATA_VIA_ID: if (!ata_via_ident(dev)) return ATA_PROBE_OK; break; case ATA_CENATEK_ID: if (!ata_cenatek_ident(dev)) return ATA_PROBE_OK; break; case ATA_MICRON_ID: if (!ata_micron_ident(dev)) return ATA_PROBE_OK; break; } /* unknown chipset, try generic AHCI or DMA if it seems possible */ if (pci_get_subclass(dev) == PCIS_STORAGE_IDE) { uint16_t vendor, device, subvendor, subdevice; const struct none_atapci *e; vendor = pci_get_vendor(dev); device = pci_get_device(dev); subvendor = pci_get_subvendor(dev); subdevice = pci_get_subdevice(dev); for (e = none_atapci_table; e->vendor != 0xffff; ++e) { if (e->vendor == vendor && e->device == device && e->subvendor == subvendor && e->subdevice == subdevice) return ENXIO; } if (!ata_generic_ident(dev)) return ATA_PROBE_OK; } return ENXIO; }
static int ql_eioctl(struct cdev *dev, u_long cmd, caddr_t data, int fflag, struct thread *td) { qla_host_t *ha; int rval = 0; device_t pci_dev; struct ifnet *ifp; q80_offchip_mem_val_t val; qla_rd_pci_ids_t *pci_ids; qla_rd_fw_dump_t *fw_dump; union { qla_reg_val_t *rv; qla_rd_flash_t *rdf; qla_wr_flash_t *wrf; qla_erase_flash_t *erf; qla_offchip_mem_val_t *mem; } u; if ((ha = (qla_host_t *)dev->si_drv1) == NULL) return ENXIO; pci_dev= ha->pci_dev; switch(cmd) { case QLA_RDWR_REG: u.rv = (qla_reg_val_t *)data; if (u.rv->direct) { if (u.rv->rd) { u.rv->val = READ_REG32(ha, u.rv->reg); } else { WRITE_REG32(ha, u.rv->reg, u.rv->val); } } else { if ((rval = ql_rdwr_indreg32(ha, u.rv->reg, &u.rv->val, u.rv->rd))) rval = ENXIO; } break; case QLA_RD_FLASH: if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.rdf = (qla_rd_flash_t *)data; if ((rval = ql_rd_flash32(ha, u.rdf->off, &u.rdf->data))) rval = ENXIO; break; case QLA_WR_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.wrf = (qla_wr_flash_t *)data; if ((rval = ql_wr_flash_buffer(ha, u.wrf->off, u.wrf->size, u.wrf->buffer))) { printf("flash write failed[%d]\n", rval); rval = ENXIO; } break; case QLA_ERASE_FLASH: ifp = ha->ifp; if (ifp == NULL) { rval = ENXIO; break; } if (ifp->if_drv_flags & (IFF_DRV_OACTIVE | IFF_DRV_RUNNING)) { rval = ENXIO; break; } if (!ha->hw.flags.fdt_valid) { rval = EIO; break; } u.erf = (qla_erase_flash_t *)data; if ((rval = ql_erase_flash(ha, u.erf->off, u.erf->size))) { printf("flash erase failed[%d]\n", rval); rval = ENXIO; } break; case QLA_RDWR_MS_MEM: u.mem = (qla_offchip_mem_val_t *)data; if ((rval = ql_rdwr_offchip_mem(ha, u.mem->off, &val, u.mem->rd))) rval = ENXIO; else { u.mem->data_lo = val.data_lo; u.mem->data_hi = val.data_hi; u.mem->data_ulo = val.data_ulo; u.mem->data_uhi = val.data_uhi; } break; case QLA_RD_FW_DUMP_SIZE: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; fw_dump->template_size = ha->hw.dma_buf.minidump.size; fw_dump->pci_func = ha->pci_func; break; case QLA_RD_FW_DUMP: if (ha->hw.mdump_init == 0) { rval = EINVAL; break; } fw_dump = (qla_rd_fw_dump_t *)data; if ((fw_dump->md_template == NULL) || (fw_dump->template_size != ha->hw.dma_buf.minidump.size)) { rval = EINVAL; break; } if ((rval = copyout(ha->hw.dma_buf.minidump.dma_b, fw_dump->md_template, fw_dump->template_size))) rval = ENXIO; break; case QLA_RD_PCI_IDS: pci_ids = (qla_rd_pci_ids_t *)data; pci_ids->ven_id = pci_get_vendor(pci_dev); pci_ids->dev_id = pci_get_device(pci_dev); pci_ids->subsys_ven_id = pci_get_subvendor(pci_dev); pci_ids->subsys_dev_id = pci_get_subdevice(pci_dev); pci_ids->rev_id = pci_read_config(pci_dev, PCIR_REVID, 1); break; default: break; } return rval; }
static int nm_pci_probe(device_t dev) { struct sc_info *sc = NULL; char *s = NULL; u_int32_t subdev, i; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); switch (pci_get_devid(dev)) { case NM256AV_PCI_ID: i = 0; while ((i < NUM_BADCARDS) && (badcards[i] != subdev)) i++; /* Try to catch other non-ac97 cards */ if (i == NUM_BADCARDS) { if (!(sc = malloc(sizeof(*sc), M_DEVBUF, M_NOWAIT | M_ZERO))) { device_printf(dev, "cannot allocate softc\n"); return ENXIO; } sc->regid = PCIR_BAR(1); sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regid, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to map register space\n"); free(sc, M_DEVBUF); return ENXIO; } /* * My Panasonic CF-M2EV needs resetting device * before checking mixer is present or not. * [email protected]. */ nm_wr(sc, 0, 0x11, 1); /* reset device */ if ((nm_rd(sc, NM_MIXER_PRESENCE, 2) & NM_PRESENCE_MASK) != NM_PRESENCE_VALUE) { i = 0; /* non-ac97 card, but not listed */ DEB(device_printf(dev, "subdev = 0x%x - badcard?\n", subdev)); } bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); free(sc, M_DEVBUF); } if (i == NUM_BADCARDS) s = "NeoMagic 256AV"; DEB(else) DEB(device_printf(dev, "this is a non-ac97 NM256AV, not attaching\n")); break; case NM256ZX_PCI_ID: s = "NeoMagic 256ZX"; break; } if (s) device_set_desc(dev, s); return s? 0 : ENXIO; }
static int tws_attach(device_t dev) { struct tws_softc *sc = device_get_softc(dev); u_int32_t bar; int error=0,i; /* no tracing yet */ /* Look up our softc and initialize its fields. */ sc->tws_dev = dev; sc->device_id = pci_get_device(dev); sc->subvendor_id = pci_get_subvendor(dev); sc->subdevice_id = pci_get_subdevice(dev); /* Intialize mutexes */ mtx_init( &sc->q_lock, "tws_q_lock", NULL, MTX_DEF); mtx_init( &sc->sim_lock, "tws_sim_lock", NULL, MTX_DEF); mtx_init( &sc->gen_lock, "tws_gen_lock", NULL, MTX_DEF); mtx_init( &sc->io_lock, "tws_io_lock", NULL, MTX_DEF | MTX_RECURSE); if ( tws_init_trace_q(sc) == FAILURE ) printf("trace init failure\n"); /* send init event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_START); mtx_unlock(&sc->gen_lock); #if _BYTE_ORDER == _BIG_ENDIAN TWS_TRACE(sc, "BIG endian", 0, 0); #endif /* sysctl context setup */ sysctl_ctx_init(&sc->tws_clist); sc->tws_oidp = SYSCTL_ADD_NODE(&sc->tws_clist, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(dev), CTLFLAG_RD, 0, ""); if ( sc->tws_oidp == NULL ) { tws_log(sc, SYSCTL_TREE_NODE_ADD); goto attach_fail_1; } SYSCTL_ADD_STRING(&sc->tws_clist, SYSCTL_CHILDREN(sc->tws_oidp), OID_AUTO, "driver_version", CTLFLAG_RD, TWS_DRIVER_VERSION_STRING, 0, "TWS driver version"); pci_enable_busmaster(dev); bar = pci_read_config(dev, TWS_PCI_BAR0, 4); TWS_TRACE_DEBUG(sc, "bar0 ", bar, 0); bar = pci_read_config(dev, TWS_PCI_BAR1, 4); bar = bar & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar1 ", bar, 0); /* MFA base address is BAR2 register used for * push mode. Firmware will evatualy move to * pull mode during witch this needs to change */ #ifndef TWS_PULL_MODE_ENABLE sc->mfa_base = (u_int64_t)pci_read_config(dev, TWS_PCI_BAR2, 4); sc->mfa_base = sc->mfa_base & ~TWS_BIT2; TWS_TRACE_DEBUG(sc, "bar2 ", sc->mfa_base, 0); #endif /* allocate MMIO register space */ sc->reg_res_id = TWS_PCI_BAR1; /* BAR1 offset */ if ((sc->reg_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->reg_res_id), 0, ~0, 1, RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_1; } sc->bus_tag = rman_get_bustag(sc->reg_res); sc->bus_handle = rman_get_bushandle(sc->reg_res); #ifndef TWS_PULL_MODE_ENABLE /* Allocate bus space for inbound mfa */ sc->mfa_res_id = TWS_PCI_BAR2; /* BAR2 offset */ if ((sc->mfa_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &(sc->mfa_res_id), 0, ~0, 0x100000, RF_ACTIVE)) == NULL) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_2; } sc->bus_mfa_tag = rman_get_bustag(sc->mfa_res); sc->bus_mfa_handle = rman_get_bushandle(sc->mfa_res); #endif /* Allocate and register our interrupt. */ sc->intr_type = TWS_INTx; /* default */ if ( tws_enable_msi ) sc->intr_type = TWS_MSI; if ( tws_setup_irq(sc) == FAILURE ) { tws_log(sc, ALLOC_MEMORY_RES); goto attach_fail_3; } /* * Create a /dev entry for this device. The kernel will assign us * a major number automatically. We use the unit number of this * device as the minor number and name the character device * "tws<unit>". */ sc->tws_cdev = make_dev(&tws_cdevsw, device_get_unit(dev), UID_ROOT, GID_OPERATOR, S_IRUSR | S_IWUSR, "tws%u", device_get_unit(dev)); sc->tws_cdev->si_drv1 = sc; if ( tws_init(sc) == FAILURE ) { tws_log(sc, TWS_INIT_FAILURE); goto attach_fail_4; } if ( tws_init_ctlr(sc) == FAILURE ) { tws_log(sc, TWS_CTLR_INIT_FAILURE); goto attach_fail_4; } if ((error = tws_cam_attach(sc))) { tws_log(sc, TWS_CAM_ATTACH); goto attach_fail_4; } /* send init complete event */ mtx_lock(&sc->gen_lock); tws_send_event(sc, TWS_INIT_COMPLETE); mtx_unlock(&sc->gen_lock); TWS_TRACE_DEBUG(sc, "attached successfully", 0, sc->device_id); return(0); attach_fail_4: tws_teardown_intr(sc); destroy_dev(sc->tws_cdev); attach_fail_3: for(i=0;i<sc->irqs;i++) { if ( sc->irq_res[i] ){ if (bus_release_resource(sc->tws_dev, SYS_RES_IRQ, sc->irq_res_id[i], sc->irq_res[i])) TWS_TRACE(sc, "bus irq res", 0, 0); } } #ifndef TWS_PULL_MODE_ENABLE attach_fail_2: #endif if ( sc->mfa_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->mfa_res_id, sc->mfa_res)) TWS_TRACE(sc, "bus release ", 0, sc->mfa_res_id); } if ( sc->reg_res ){ if (bus_release_resource(sc->tws_dev, SYS_RES_MEMORY, sc->reg_res_id, sc->reg_res)) TWS_TRACE(sc, "bus release2 ", 0, sc->reg_res_id); } attach_fail_1: mtx_destroy(&sc->q_lock); mtx_destroy(&sc->sim_lock); mtx_destroy(&sc->gen_lock); mtx_destroy(&sc->io_lock); sysctl_ctx_free(&sc->tws_clist); return (ENXIO); }
static int ds_pci_attach(device_t dev) { u_int32_t subdev, i; struct sc_info *sc; struct ac97_info *codec = NULL; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_ds1 softc"); sc->dev = dev; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); sc->type = ds_finddev(pci_get_devid(dev), subdev); sc->rev = pci_get_revid(dev); pci_enable_busmaster(dev); sc->regid = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regid, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to map register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->bufsz = pcm_getbuffersize(dev, 4096, DS1_BUFFSIZE, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &sc->buffer_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } sc->regbase = NULL; if (ds_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } codec = AC97_CREATE(dev, sc, ds_ac97); if (codec == NULL) goto bad; /* * Turn on inverted external amplifier sense flags for few * 'special' boards. */ switch (subdev) { case 0x81171033: /* NEC ValueStar (VT550/0) */ ac97_setflags(codec, ac97_getflags(codec) | AC97_F_EAPD_INV); break; default: break; } mixer_init(dev, ac97_getmixerclass(), codec); sc->irqid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, INTR_MPSAFE, ds_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } snprintf(status, SND_STATUSLEN, "at memory 0x%lx irq %ld %s", rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_ds1)); if (pcm_register(dev, sc, DS1_CHANS, 2)) goto bad; for (i = 0; i < DS1_CHANS; i++) pcm_addchan(dev, PCMDIR_PLAY, &ds1pchan_class, sc); for (i = 0; i < 2; i++) pcm_addchan(dev, PCMDIR_REC, &ds1rchan_class, sc); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->reg) bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->buffer_dmat) bus_dma_tag_destroy(sc->buffer_dmat); if (sc->control_dmat) bus_dma_tag_destroy(sc->control_dmat); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return ENXIO; }