static int vtpci_probe(device_t dev) { char desc[36]; const char *name; if (pci_get_vendor(dev) != VIRTIO_PCI_VENDORID) return (ENXIO); if (pci_get_device(dev) < VIRTIO_PCI_DEVICEID_MIN || pci_get_device(dev) > VIRTIO_PCI_DEVICEID_MAX) return (ENXIO); if (pci_get_revid(dev) != VIRTIO_PCI_ABI_VERSION) return (ENXIO); name = virtio_device_name(pci_get_subdevice(dev)); if (name == NULL) name = "Unknown"; snprintf(desc, sizeof(desc), "VirtIO PCI %s adapter", name); device_set_desc_copy(dev, desc); return (BUS_PROBE_DEFAULT); }
static void ehci_pci_ati_quirk(device_t self, uint8_t is_sb700) { device_t smbdev; uint32_t val; if (is_sb700) { /* Lookup SMBUS PCI device */ smbdev = pci_find_device(PCI_EHCI_VENDORID_ATI, 0x4385); if (smbdev == NULL) return; val = pci_get_revid(smbdev); if (val != 0x3a && val != 0x3b) return; } /* * Note: this bit is described as reserved in SB700 * Register Reference Guide. */ val = pci_read_config(self, 0x53, 1); if (!(val & 0x8)) { val |= 0x8; pci_write_config(self, 0x53, val, 1); device_printf(self, "AMD SB600/700 quirk applied\n"); } }
static int intsmb_probe(device_t dev) { switch (pci_get_devid(dev)) { case 0x71138086: /* Intel 82371AB */ case 0x719b8086: /* Intel 82443MX */ #if 0 /* Not a good idea yet, this stops isab0 functioning */ case 0x02001166: /* ServerWorks OSB4 */ #endif device_set_desc(dev, "Intel PIIX4 SMBUS Interface"); break; case 0x43721002: device_set_desc(dev, "ATI IXP400 SMBus Controller"); break; case 0x43851002: /* SB800 and newer can not be configured in a compatible way. */ if (pci_get_revid(dev) >= 0x40) return (ENXIO); device_set_desc(dev, "AMD SB600/700/710/750 SMBus Controller"); /* XXX Maybe force polling right here? */ break; default: return (ENXIO); } return (BUS_PROBE_DEFAULT); }
static int hpt_attach(device_t dev) { PHBA hba = (PHBA)device_get_softc(dev); HIM *him = hba->ldm_adapter.him; PCI_ID pci_id; HPT_UINT size; PVBUS vbus; PVBUS_EXT vbus_ext; KdPrint(("hpt_attach(%d/%d/%d)", pci_get_bus(dev), pci_get_slot(dev), pci_get_function(dev))); pci_enable_busmaster(dev); pci_id.vid = pci_get_vendor(dev); pci_id.did = pci_get_device(dev); pci_id.rev = pci_get_revid(dev); pci_id.subsys = (HPT_U32)(pci_get_subdevice(dev)) << 16 | pci_get_subvendor(dev); size = him->get_adapter_size(&pci_id); hba->ldm_adapter.him_handle = kmalloc(size, M_DEVBUF, M_WAITOK); if (!hba->ldm_adapter.him_handle) return ENXIO; hba->pcidev = dev; hba->pciaddr.tree = 0; hba->pciaddr.bus = pci_get_bus(dev); hba->pciaddr.device = pci_get_slot(dev); hba->pciaddr.function = pci_get_function(dev); if (!him->create_adapter(&pci_id, hba->pciaddr, hba->ldm_adapter.him_handle, hba)) { kfree(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } os_printk("adapter at PCI %d:%d:%d, IRQ %d", hba->pciaddr.bus, hba->pciaddr.device, hba->pciaddr.function, pci_get_irq(dev)); if (!ldm_register_adapter(&hba->ldm_adapter)) { size = ldm_get_vbus_size(); vbus_ext = kmalloc(sizeof(VBUS_EXT) + size, M_DEVBUF, M_WAITOK); if (!vbus_ext) { kfree(hba->ldm_adapter.him_handle, M_DEVBUF); return -1; } memset(vbus_ext, 0, sizeof(VBUS_EXT)); vbus_ext->ext_type = EXT_TYPE_VBUS; ldm_create_vbus((PVBUS)vbus_ext->vbus, vbus_ext); ldm_register_adapter(&hba->ldm_adapter); } ldm_for_each_vbus(vbus, vbus_ext) { if (hba->ldm_adapter.vbus==vbus) { hba->vbus_ext = vbus_ext; hba->next = vbus_ext->hba_list; vbus_ext->hba_list = hba; break; } } return 0; }
static int intsmb_probe(device_t dev) { switch (pci_get_devid(dev)) { case 0x71138086: /* Intel 82371AB */ case 0x719b8086: /* Intel 82443MX */ #if 0 /* Not a good idea yet, this stops isab0 functioning */ case 0x02001166: /* ServerWorks OSB4 */ #endif device_set_desc(dev, "Intel PIIX4 SMBUS Interface"); break; case 0x43721002: device_set_desc(dev, "ATI IXP400 SMBus Controller"); break; case 0x43851002: device_set_desc(dev, "AMD SB600/7xx/8xx/9xx SMBus Controller"); break; case 0x780b1022: /* AMD FCH */ if (pci_get_revid(dev) < 0x40) return (ENXIO); device_set_desc(dev, "AMD FCH SMBus Controller"); break; default: return (ENXIO); } return (BUS_PROBE_DEFAULT); }
static int amdsbwd_attach_sb(device_t dev, struct amdsbwd_softc *sc) { device_t smb_dev; sc->max_ticks = UINT16_MAX; sc->rid_ctrl = 0; sc->rid_count = 1; smb_dev = pci_find_bsf(0, 20, 0); KASSERT(smb_dev != NULL, ("can't find SMBus PCI device\n")); if (pci_get_revid(smb_dev) < AMDSB8_SMBUS_REVID) sc->ms_per_tick = 10; else sc->ms_per_tick = 1000; sc->res_ctrl = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid_ctrl, RF_ACTIVE); if (sc->res_ctrl == NULL) { device_printf(dev, "bus_alloc_resource for ctrl failed\n"); return (ENXIO); } sc->res_count = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->rid_count, RF_ACTIVE); if (sc->res_count == NULL) { device_printf(dev, "bus_alloc_resource for count failed\n"); return (ENXIO); } return (0); }
/* * Probe and attach the card */ static int via_probe(device_t dev) { switch(pci_get_devid(dev)) { case VIA8233_PCI_ID: switch(pci_get_revid(dev)) { case VIA8233_REV_ID_8233PRE: device_set_desc(dev, "VIA VT8233 (pre)"); return BUS_PROBE_DEFAULT; case VIA8233_REV_ID_8233C: device_set_desc(dev, "VIA VT8233C"); return BUS_PROBE_DEFAULT; case VIA8233_REV_ID_8233: device_set_desc(dev, "VIA VT8233"); return BUS_PROBE_DEFAULT; case VIA8233_REV_ID_8233A: device_set_desc(dev, "VIA VT8233A"); return BUS_PROBE_DEFAULT; case VIA8233_REV_ID_8235: device_set_desc(dev, "VIA VT8235"); return BUS_PROBE_DEFAULT; case VIA8233_REV_ID_8237: device_set_desc(dev, "VIA VT8237"); return BUS_PROBE_DEFAULT; case VIA8233_REV_ID_8251: device_set_desc(dev, "VIA VT8251"); return BUS_PROBE_DEFAULT; default: device_set_desc(dev, "VIA VT8233X"); /* Unknown */ return BUS_PROBE_DEFAULT; } } return ENXIO; }
static int adv_pci_probe(device_t dev) { int rev = pci_get_revid(dev); switch (pci_get_devid(dev)) { case PCI_DEVICE_ID_ADVANSYS_1200A: device_set_desc(dev, "AdvanSys ASC1200A SCSI controller"); return 0; case PCI_DEVICE_ID_ADVANSYS_1200B: device_set_desc(dev, "AdvanSys ASC1200B SCSI controller"); return 0; case PCI_DEVICE_ID_ADVANSYS_3000: if (rev == PCI_DEVICE_REV_ADVANSYS_3150) { device_set_desc(dev, "AdvanSys ASC3150 SCSI controller"); return 0; } else if (rev == PCI_DEVICE_REV_ADVANSYS_3050) { device_set_desc(dev, "AdvanSys ASC3030/50 SCSI controller"); return 0; } else if (rev >= PCI_DEVICE_REV_ADVANSYS_3150) { device_set_desc(dev, "Unknown AdvanSys controller"); return 0; } break; default: break; } return ENXIO; }
static int rtw_pci_attach(device_t dev) { struct rtw_softc *sc = device_get_softc(dev); struct rtw_regs *regs = &sc->sc_regs; int i, error; /* * No power management hooks. * XXX Maybe we should add some! */ sc->sc_flags |= RTW_F_ENABLED; sc->sc_rev = pci_get_revid(dev); #ifndef BURN_BRIDGES if (pci_get_powerstate(dev) != PCI_POWERSTATE_D0) { uint32_t mem, port, irq; mem = pci_read_config(dev, RTW_PCI_MMBA, 4); port = pci_read_config(dev, RTW_PCI_IOBA, 4); irq = pci_read_config(dev, PCIR_INTLINE, 4); device_printf(dev, "chip is in D%d power mode " "-- setting to D0\n", pci_get_powerstate(dev)); pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_write_config(dev, RTW_PCI_MMBA, mem, 4); pci_write_config(dev, RTW_PCI_IOBA, port, 4); pci_write_config(dev, PCIR_INTLINE, irq, 4); } #endif /* !BURN_BRIDGES */ /* Enable PCI bus master */ pci_enable_busmaster(dev); /* Allocate IO memory/port */ for (i = 0; i < NELEM(rtw_pci_regs); ++i) { regs->r_rid = rtw_pci_regs[i].reg_rid; regs->r_type = rtw_pci_regs[i].reg_type; regs->r_res = bus_alloc_resource_any(dev, regs->r_type, ®s->r_rid, RF_ACTIVE); if (regs->r_res != NULL) break; } if (regs->r_res == NULL) { device_printf(dev, "can't allocate IO mem/port\n"); return ENXIO; } regs->r_bh = rman_get_bushandle(regs->r_res); regs->r_bt = rman_get_bustag(regs->r_res); error = rtw_attach(dev); if (error) rtw_pci_detach(dev); return error; }
static int hdspe_attach(device_t dev) { struct sc_info *sc; struct sc_pcminfo *scp; struct hdspe_channel *chan_map; uint32_t rev; int i, err; #if 0 device_printf(dev, "hdspe_attach()\n"); #endif set_pci_config(dev); sc = device_get_softc(dev); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_hdspe softc"); sc->dev = dev; rev = pci_get_revid(dev); switch (rev) { case PCI_REVISION_AIO: sc->type = AIO; chan_map = chan_map_aio; break; case PCI_REVISION_RAYDAT: sc->type = RAYDAT; chan_map = chan_map_rd; break; default: return ENXIO; } /* Allocate resources. */ err = hdspe_alloc_resources(sc); if (err) { device_printf(dev, "Unable to allocate system resources.\n"); return ENXIO; } if (hdspe_init(sc) != 0) return ENXIO; for (i = 0; i < HDSPE_MAX_CHANS && chan_map[i].descr != NULL; i++) { scp = malloc(sizeof(struct sc_pcminfo), M_DEVBUF, M_NOWAIT | M_ZERO); scp->hc = &chan_map[i]; scp->sc = sc; scp->dev = device_add_child(dev, "pcm", -1); device_set_ivars(scp->dev, scp); } hdspe_map_dmabuf(sc); return (bus_generic_attach(dev)); }
const struct ata_chip_id * ata_match_chip(device_t dev, const struct ata_chip_id *index) { while (index->chipid != 0) { if (pci_get_devid(dev) == index->chipid && pci_get_revid(dev) >= index->chiprev) return index; index++; } return NULL; }
static int amdsbwd_probe(device_t dev) { struct resource *res; device_t smb_dev; uint32_t addr; int rid; int rc; /* Do not claim some ISA PnP device by accident. */ if (isa_get_logicalid(dev) != 0) return (ENXIO); rc = bus_set_resource(dev, SYS_RES_IOPORT, 0, AMDSB_PMIO_INDEX, AMDSB_PMIO_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for IO failed\n"); return (ENXIO); } rid = 0; res = bus_alloc_resource(dev, SYS_RES_IOPORT, &rid, 0ul, ~0ul, AMDSB_PMIO_WIDTH, RF_ACTIVE | RF_SHAREABLE); if (res == NULL) { device_printf(dev, "bus_alloc_resource for IO failed\n"); return (ENXIO); } smb_dev = pci_find_bsf(0, 20, 0); KASSERT(smb_dev != NULL, ("can't find SMBus PCI device\n")); if (pci_get_revid(smb_dev) < AMDSB8_SMBUS_REVID) amdsbwd_probe_sb7xx(dev, res, &addr); else amdsbwd_probe_sb8xx(dev, res, &addr); bus_release_resource(dev, SYS_RES_IOPORT, rid, res); bus_delete_resource(dev, SYS_RES_IOPORT, rid); amdsbwd_verbose_printf(dev, "memory base address = %#010x\n", addr); rc = bus_set_resource(dev, SYS_RES_MEMORY, 0, addr + AMDSB_WD_CTRL, AMDSB_WDIO_REG_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for control failed\n"); return (ENXIO); } rc = bus_set_resource(dev, SYS_RES_MEMORY, 1, addr + AMDSB_WD_COUNT, AMDSB_WDIO_REG_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for count failed\n"); return (ENXIO); } return (0); }
static int set_pci_config(device_t dev) { uint32_t data; pci_enable_busmaster(dev); data = pci_get_revid(dev); data |= PCIM_CMD_PORTEN; pci_write_config(dev, PCIR_COMMAND, data, 2); return 0; }
static void ehci_pci_via_quirk(device_t self) { uint32_t val; if ((pci_get_device(self) == 0x3104) && ((pci_get_revid(self) & 0xf0) == 0x60)) { /* Correct schedule sleep time to 10us */ val = pci_read_config(self, 0x4b, 1); if (val & 0x20) return; pci_write_config(self, 0x4b, val, 1); device_printf(self, "VIA-quirk applied\n"); } }
const struct ata_chip_id * ata_match_chip(device_t dev, const struct ata_chip_id *index) { uint32_t devid; uint8_t revid; devid = pci_get_devid(dev); revid = pci_get_revid(dev); while (index->chipid != 0) { if (devid == index->chipid && revid >= index->chiprev) return (index); index++; } return (NULL); }
static int hdspe_probe(device_t dev) { uint32_t rev; if (pci_get_vendor(dev) == PCI_VENDOR_XILINX && pci_get_device(dev) == PCI_DEVICE_XILINX_HDSPE) { rev = pci_get_revid(dev); switch (rev) { case PCI_REVISION_AIO: device_set_desc(dev, "RME HDSPe AIO"); return 0; case PCI_REVISION_RAYDAT: device_set_desc(dev, "RME HDSPe RayDAT"); return 0; } } return (ENXIO); }
static int siba_bwn_attach(device_t dev) { struct siba_bwn_softc *ssc = device_get_softc(dev); struct siba_softc *siba = &ssc->ssc_siba; siba->siba_dev = dev; siba->siba_type = SIBA_TYPE_PCI; /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ siba->siba_mem_rid = SIBA_PCIR_BAR; siba->siba_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &siba->siba_mem_rid, RF_ACTIVE); if (siba->siba_mem_res == NULL) { device_printf(dev, "cannot map register space\n"); return (ENXIO); } siba->siba_mem_bt = rman_get_bustag(siba->siba_mem_res); siba->siba_mem_bh = rman_get_bushandle(siba->siba_mem_res); /* Get more PCI information */ siba->siba_pci_did = pci_get_device(dev); siba->siba_pci_vid = pci_get_vendor(dev); siba->siba_pci_subvid = pci_get_subvendor(dev); siba->siba_pci_subdid = pci_get_subdevice(dev); siba->siba_pci_revid = pci_get_revid(dev); return (siba_core_attach(siba)); }
const struct ata_chip_id * ata_find_chip(device_t dev, const struct ata_chip_id *index, int slot) { device_t *children; int nchildren, i; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return NULL; while (index->chipid != 0) { for (i = 0; i < nchildren; i++) { if (((slot >= 0 && pci_get_slot(children[i]) == slot) || (slot < 0 && pci_get_slot(children[i]) <= -slot)) && pci_get_devid(children[i]) == index->chipid && pci_get_revid(children[i]) >= index->chiprev) { kfree(children, M_TEMP); return index; } } index++; } kfree(children, M_TEMP); return NULL; }
static int ds_pci_attach(device_t dev) { u_int32_t subdev, i; struct sc_info *sc; struct ac97_info *codec = NULL; char status[SND_STATUSLEN]; sc = malloc(sizeof(*sc), M_DEVBUF, M_WAITOK | M_ZERO); sc->lock = snd_mtxcreate(device_get_nameunit(dev), "snd_ds1 softc"); sc->dev = dev; subdev = (pci_get_subdevice(dev) << 16) | pci_get_subvendor(dev); sc->type = ds_finddev(pci_get_devid(dev), subdev); sc->rev = pci_get_revid(dev); pci_enable_busmaster(dev); sc->regid = PCIR_BAR(0); sc->reg = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->regid, RF_ACTIVE); if (!sc->reg) { device_printf(dev, "unable to map register space\n"); goto bad; } sc->st = rman_get_bustag(sc->reg); sc->sh = rman_get_bushandle(sc->reg); sc->bufsz = pcm_getbuffersize(dev, 4096, DS1_BUFFSIZE, 65536); if (bus_dma_tag_create(/*parent*/bus_get_dma_tag(dev), /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/sc->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, /*lockfunc*/NULL, /*lockarg*/NULL, &sc->buffer_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } sc->regbase = NULL; if (ds_init(sc) == -1) { device_printf(dev, "unable to initialize the card\n"); goto bad; } codec = AC97_CREATE(dev, sc, ds_ac97); if (codec == NULL) goto bad; /* * Turn on inverted external amplifier sense flags for few * 'special' boards. */ switch (subdev) { case 0x81171033: /* NEC ValueStar (VT550/0) */ ac97_setflags(codec, ac97_getflags(codec) | AC97_F_EAPD_INV); break; default: break; } mixer_init(dev, ac97_getmixerclass(), codec); sc->irqid = 0; sc->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->irqid, RF_ACTIVE | RF_SHAREABLE); if (!sc->irq || snd_setup_intr(dev, sc->irq, INTR_MPSAFE, ds_intr, sc, &sc->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } snprintf(status, SND_STATUSLEN, "at memory 0x%lx irq %ld %s", rman_get_start(sc->reg), rman_get_start(sc->irq),PCM_KLDSTRING(snd_ds1)); if (pcm_register(dev, sc, DS1_CHANS, 2)) goto bad; for (i = 0; i < DS1_CHANS; i++) pcm_addchan(dev, PCMDIR_PLAY, &ds1pchan_class, sc); for (i = 0; i < 2; i++) pcm_addchan(dev, PCMDIR_REC, &ds1rchan_class, sc); pcm_setstatus(dev, status); return 0; bad: if (codec) ac97_destroy(codec); if (sc->reg) bus_release_resource(dev, SYS_RES_MEMORY, sc->regid, sc->reg); if (sc->ih) bus_teardown_intr(dev, sc->irq, sc->ih); if (sc->irq) bus_release_resource(dev, SYS_RES_IRQ, sc->irqid, sc->irq); if (sc->buffer_dmat) bus_dma_tag_destroy(sc->buffer_dmat); if (sc->control_dmat) bus_dma_tag_destroy(sc->control_dmat); if (sc->lock) snd_mtxfree(sc->lock); free(sc, M_DEVBUF); return ENXIO; }
static void ichss_identify(driver_t *driver, device_t parent) { device_t child; uint32_t pmbase; if (resource_disabled("ichss", 0)) return; /* * It appears that ICH SpeedStep only requires a single CPU to * set the value (since the chipset is shared by all CPUs.) * Thus, we only add a child to cpu 0. */ if (device_get_unit(parent) != 0) return; /* Avoid duplicates. */ if (device_find_child(parent, "ichss", -1)) return; /* * ICH2/3/4-M I/O Controller Hub is at bus 0, slot 1F, function 0. * E.g. see Section 6.1 "PCI Devices and Functions" and table 6.1 of * Intel(r) 82801BA I/O Controller Hub 2 (ICH2) and Intel(r) 82801BAM * I/O Controller Hub 2 Mobile (ICH2-M). */ ich_device = pci_find_bsf(0, 0x1f, 0); if (ich_device == NULL || pci_get_vendor(ich_device) != PCI_VENDOR_INTEL || (pci_get_device(ich_device) != PCI_DEV_82801BA && pci_get_device(ich_device) != PCI_DEV_82801CA && pci_get_device(ich_device) != PCI_DEV_82801DB)) return; /* * Certain systems with ICH2 and an Intel 82815_MC host bridge * where the host bridge's revision is < 5 lockup if SpeedStep * is used. */ if (pci_get_device(ich_device) == PCI_DEV_82801BA) { device_t hostb; hostb = pci_find_bsf(0, 0, 0); if (hostb != NULL && pci_get_vendor(hostb) == PCI_VENDOR_INTEL && pci_get_device(hostb) == PCI_DEV_82815_MC && pci_get_revid(hostb) < 5) return; } /* Find the PMBASE register from our PCI config header. */ pmbase = pci_read_config(ich_device, ICHSS_PMBASE_OFFSET, sizeof(pmbase)); if ((pmbase & ICHSS_IO_REG) == 0) { printf("ichss: invalid PMBASE memory type\n"); return; } pmbase &= ICHSS_PMBASE_MASK; if (pmbase == 0) { printf("ichss: invalid zero PMBASE address\n"); return; } DPRINT("ichss: PMBASE is %#x\n", pmbase); child = BUS_ADD_CHILD(parent, 20, "ichss", 0); if (child == NULL) { device_printf(parent, "add SpeedStep child failed\n"); return; } /* Add the bus master arbitration and control registers. */ bus_set_resource(child, SYS_RES_IOPORT, 0, pmbase + ICHSS_BM_OFFSET, 1); bus_set_resource(child, SYS_RES_IOPORT, 1, pmbase + ICHSS_CTRL_OFFSET, 1); }
static int via_attach(device_t dev) { struct via_info *via = NULL; char status[SND_STATUSLEN]; int i, via_dxs_disabled, via_dxs_src, via_dxs_chnum, via_sgd_chnum; uint32_t revid; via = kmalloc(sizeof *via, M_DEVBUF, M_WAITOK | M_ZERO); via->lock = snd_mtxcreate(device_get_nameunit(dev), "sound softc"); pci_set_powerstate(dev, PCI_POWERSTATE_D0); pci_enable_busmaster(dev); via->regid = PCIR_BAR(0); via->reg = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &via->regid, RF_ACTIVE); if (!via->reg) { device_printf(dev, "cannot allocate bus resource."); goto bad; } via->st = rman_get_bustag(via->reg); via->sh = rman_get_bushandle(via->reg); via->bufsz = pcm_getbuffersize(dev, 4096, VIA_DEFAULT_BUFSZ, 65536); via->irqid = 0; via->irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &via->irqid, RF_ACTIVE | RF_SHAREABLE); if (!via->irq || snd_setup_intr(dev, via->irq, INTR_MPSAFE, via_intr, via, &via->ih)) { device_printf(dev, "unable to map interrupt\n"); goto bad; } /* DMA tag for buffers */ if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/via->bufsz, /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, &via->parent_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } /* * DMA tag for SGD table. The 686 uses scatter/gather DMA and * requires a list in memory of work to do. We need only 16 bytes * for this list, and it is wasteful to allocate 16K. */ if (bus_dma_tag_create(/*parent*/NULL, /*alignment*/2, /*boundary*/0, /*lowaddr*/BUS_SPACE_MAXADDR_32BIT, /*highaddr*/BUS_SPACE_MAXADDR, /*filter*/NULL, /*filterarg*/NULL, /*maxsize*/NSEGS * sizeof(struct via_dma_op), /*nsegments*/1, /*maxsegz*/0x3ffff, /*flags*/0, &via->sgd_dmat) != 0) { device_printf(dev, "unable to create dma tag\n"); goto bad; } if (bus_dmamem_alloc(via->sgd_dmat, (void **)&via->sgd_table, BUS_DMA_NOWAIT, &via->sgd_dmamap) == -1) goto bad; if (bus_dmamap_load(via->sgd_dmat, via->sgd_dmamap, via->sgd_table, NSEGS * sizeof(struct via_dma_op), dma_cb, via, 0)) goto bad; if (via_chip_init(dev)) goto bad; via->codec = AC97_CREATE(dev, via, via_ac97); if (!via->codec) goto bad; mixer_init(dev, ac97_getmixerclass(), via->codec); via->codec_caps = ac97_getextcaps(via->codec); /* Try to set VRA without generating an error, VRM not reqrd yet */ if (via->codec_caps & (AC97_EXTCAP_VRA | AC97_EXTCAP_VRM | AC97_EXTCAP_DRA)) { u_int16_t ext = ac97_getextmode(via->codec); ext |= (via->codec_caps & (AC97_EXTCAP_VRA | AC97_EXTCAP_VRM)); ext &= ~AC97_EXTCAP_DRA; ac97_setextmode(via->codec, ext); } ksnprintf(status, SND_STATUSLEN, "at io 0x%lx irq %ld %s", rman_get_start(via->reg), rman_get_start(via->irq),PCM_KLDSTRING(snd_via8233)); revid = pci_get_revid(dev); /* * VIA8251 lost its interrupt after DMA EOL, and need * a gentle spank on its face within interrupt handler. */ if (revid == VIA8233_REV_ID_8251) via->dma_eol_wake = 1; else via->dma_eol_wake = 0; /* * Decide whether DXS had to be disabled or not */ if (revid == VIA8233_REV_ID_8233A) { /* * DXS channel is disabled. Reports from multiple users * that it plays at half-speed. Do not see this behaviour * on available 8233C or when emulating 8233A register set * on 8233C (either with or without ac97 VRA). */ via_dxs_disabled = 1; } else if (resource_int_value(device_get_name(dev), device_get_unit(dev), "via_dxs_disabled", &via_dxs_disabled) == 0) via_dxs_disabled = (via_dxs_disabled > 0) ? 1 : 0; else via_dxs_disabled = 0; if (via_dxs_disabled) { via_dxs_chnum = 0; via_sgd_chnum = 1; } else { if (resource_int_value(device_get_name(dev), device_get_unit(dev), "via_dxs_channels", &via_dxs_chnum) != 0) via_dxs_chnum = NDXSCHANS; if (resource_int_value(device_get_name(dev), device_get_unit(dev), "via_sgd_channels", &via_sgd_chnum) != 0) via_sgd_chnum = NMSGDCHANS; } if (via_dxs_chnum > NDXSCHANS) via_dxs_chnum = NDXSCHANS; else if (via_dxs_chnum < 0) via_dxs_chnum = 0; if (via_sgd_chnum > NMSGDCHANS) via_sgd_chnum = NMSGDCHANS; else if (via_sgd_chnum < 0) via_sgd_chnum = 0; if (via_dxs_chnum + via_sgd_chnum < 1) { /* Minimalist ? */ via_dxs_chnum = 1; via_sgd_chnum = 0; } if (via_dxs_chnum > 0 && resource_int_value(device_get_name(dev), device_get_unit(dev), "via_dxs_src", &via_dxs_src) == 0) via->dxs_src = (via_dxs_src > 0) ? 1 : 0; else via->dxs_src = 0; /* Register */ if (pcm_register(dev, via, via_dxs_chnum + via_sgd_chnum, NWRCHANS)) goto bad; for (i = 0; i < via_dxs_chnum; i++) pcm_addchan(dev, PCMDIR_PLAY, &via8233dxs_class, via); for (i = 0; i < via_sgd_chnum; i++) pcm_addchan(dev, PCMDIR_PLAY, &via8233msgd_class, via); for (i = 0; i < NWRCHANS; i++) pcm_addchan(dev, PCMDIR_REC, &via8233wr_class, via); if (via_dxs_chnum > 0) via_init_sysctls(dev); device_printf(dev, "<VIA DXS %sabled: DXS%s %d / SGD %d / REC %d>\n", (via_dxs_chnum > 0) ? "En" : "Dis", (via->dxs_src) ? "(SRC)" : "", via_dxs_chnum, via_sgd_chnum, NWRCHANS); pcm_setstatus(dev, status); return 0; bad: if (via->codec) ac97_destroy(via->codec); if (via->reg) bus_release_resource(dev, SYS_RES_IOPORT, via->regid, via->reg); if (via->ih) bus_teardown_intr(dev, via->irq, via->ih); if (via->irq) bus_release_resource(dev, SYS_RES_IRQ, via->irqid, via->irq); if (via->parent_dmat) bus_dma_tag_destroy(via->parent_dmat); if (via->sgd_dmamap) bus_dmamap_unload(via->sgd_dmat, via->sgd_dmamap); if (via->sgd_table) bus_dmamem_free(via->sgd_dmat, via->sgd_table, via->sgd_dmamap); if (via->sgd_dmat) bus_dma_tag_destroy(via->sgd_dmat); if (via->lock) snd_mtxfree(via->lock); if (via) kfree(via, M_DEVBUF); return ENXIO; }
static int sb8xx_attach(device_t dev) { static const int AMDSB_SMBIO_WIDTH = 0x14; struct intsmb_softc *sc; struct resource *res; uint32_t devid; uint8_t revid; uint16_t addr; int rid; int rc; bool enabled; sc = device_get_softc(dev); rid = 0; rc = bus_set_resource(dev, SYS_RES_IOPORT, rid, AMDSB_PMIO_INDEX, AMDSB_PMIO_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for PM IO failed\n"); return (ENXIO); } res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &rid, RF_ACTIVE); if (res == NULL) { device_printf(dev, "bus_alloc_resource for PM IO failed\n"); return (ENXIO); } devid = pci_get_devid(dev); revid = pci_get_revid(dev); if (devid == AMDSB_SMBUS_DEVID || (devid == AMDFCH_SMBUS_DEVID && revid < AMDFCH41_SMBUS_REVID) || (devid == AMDCZ_SMBUS_DEVID && revid < AMDCZ49_SMBUS_REVID)) { addr = amd_pmio_read(res, AMDSB8_PM_SMBUS_EN + 1); addr <<= 8; addr |= amd_pmio_read(res, AMDSB8_PM_SMBUS_EN); enabled = (addr & AMDSB8_SMBUS_EN) != 0; addr &= AMDSB8_SMBUS_ADDR_MASK; } else { addr = amd_pmio_read(res, AMDFCH41_PM_DECODE_EN0); enabled = (addr & AMDFCH41_SMBUS_EN) != 0; addr = amd_pmio_read(res, AMDFCH41_PM_DECODE_EN1); addr <<= 8; } bus_release_resource(dev, SYS_RES_IOPORT, rid, res); bus_delete_resource(dev, SYS_RES_IOPORT, rid); if (!enabled) { device_printf(dev, "SB8xx/SB9xx/FCH SMBus not enabled\n"); return (ENXIO); } sc->io_rid = 0; rc = bus_set_resource(dev, SYS_RES_IOPORT, sc->io_rid, addr, AMDSB_SMBIO_WIDTH); if (rc != 0) { device_printf(dev, "bus_set_resource for SMBus IO failed\n"); return (ENXIO); } if (res == NULL) { device_printf(dev, "bus_alloc_resource for SMBus IO failed\n"); return (ENXIO); } sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid, RF_ACTIVE); sc->poll = 1; return (0); }
static int intsmb_attach(device_t dev) { struct intsmb_softc *sc = device_get_softc(dev); int error, rid, value; int intr; char *str; sc->dev = dev; mtx_init(&sc->lock, device_get_nameunit(dev), "intsmb", MTX_DEF); sc->cfg_irq9 = 0; switch (pci_get_devid(dev)) { #ifndef NO_CHANGE_PCICONF case 0x71138086: /* Intel 82371AB */ case 0x719b8086: /* Intel 82443MX */ /* Changing configuration is allowed. */ sc->cfg_irq9 = 1; break; #endif case 0x43851002: case 0x780b1022: if (pci_get_revid(dev) >= 0x40) sc->sb8xx = 1; break; } if (sc->sb8xx) { error = sb8xx_attach(dev); if (error != 0) goto fail; else goto no_intr; } sc->io_rid = PCI_BASE_ADDR_SMB; sc->io_res = bus_alloc_resource_any(dev, SYS_RES_IOPORT, &sc->io_rid, RF_ACTIVE); if (sc->io_res == NULL) { device_printf(dev, "Could not allocate I/O space\n"); error = ENXIO; goto fail; } if (sc->cfg_irq9) { pci_write_config(dev, PCIR_INTLINE, 0x9, 1); pci_write_config(dev, PCI_HST_CFG_SMB, PCI_INTR_SMB_IRQ9 | PCI_INTR_SMB_ENABLE, 1); } value = pci_read_config(dev, PCI_HST_CFG_SMB, 1); sc->poll = (value & PCI_INTR_SMB_ENABLE) == 0; intr = value & PCI_INTR_SMB_MASK; switch (intr) { case PCI_INTR_SMB_SMI: str = "SMI"; break; case PCI_INTR_SMB_IRQ9: str = "IRQ 9"; break; case PCI_INTR_SMB_IRQ_PCI: str = "PCI IRQ"; break; default: str = "BOGUS"; } device_printf(dev, "intr %s %s ", str, sc->poll == 0 ? "enabled" : "disabled"); printf("revision %d\n", pci_read_config(dev, PCI_REVID_SMB, 1)); if (!sc->poll && intr == PCI_INTR_SMB_SMI) { device_printf(dev, "using polling mode when configured interrupt is SMI\n"); sc->poll = 1; } if (sc->poll) goto no_intr; if (intr != PCI_INTR_SMB_IRQ9 && intr != PCI_INTR_SMB_IRQ_PCI) { device_printf(dev, "Unsupported interrupt mode\n"); error = ENXIO; goto fail; } /* Force IRQ 9. */ rid = 0; if (sc->cfg_irq9) bus_set_resource(dev, SYS_RES_IRQ, rid, 9, 1); sc->irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE); if (sc->irq_res == NULL) { device_printf(dev, "Could not allocate irq\n"); error = ENXIO; goto fail; } error = bus_setup_intr(dev, sc->irq_res, INTR_TYPE_MISC | INTR_MPSAFE, NULL, intsmb_rawintr, sc, &sc->irq_hand); if (error) { device_printf(dev, "Failed to map intr\n"); goto fail; } no_intr: sc->isbusy = 0; sc->smbus = device_add_child(dev, "smbus", -1); if (sc->smbus == NULL) { error = ENXIO; goto fail; } error = device_probe_and_attach(sc->smbus); if (error) goto fail; #ifdef ENABLE_ALART /* Enable Arart */ bus_write_1(sc->io_res, PIIX4_SMBSLVCNT, PIIX4_SMBSLVCNT_ALTEN); #endif return (0); fail: intsmb_detach(dev); return (error); }
int pcigfb_attach(device_t dev) { int s; gfb_softc_t sc; video_adapter_t *adp; int unit, flags, error, rid, va_index; #ifdef __alpha__ struct ctb *ctb; #endif /* __alpha__ */ s = splimp(); error = 0; unit = device_get_unit(dev); flags = device_get_flags(dev); sc = device_get_softc(dev); sc->rev = pci_get_revid(dev); rid = GFB_MEM_BASE_RID; sc->res = bus_alloc_resource(dev, SYS_RES_MEMORY, &rid, 0, ~0, 1, RF_ACTIVE|PCI_RF_DENSE); if(sc->res == NULL) { device_printf(dev, "couldn't map memory\n"); goto fail; } sc->btag = rman_get_bustag(sc->res); sc->bhandle = rman_get_bushandle(sc->res); /* Allocate interrupt (irq)... */ rid = 0x0; sc->irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, 0, ~0, 1, RF_SHAREABLE|RF_ACTIVE); if(sc->irq == NULL) { device_printf(dev, "Couldn't map interrupt\n"); goto fail; } if((va_index = vid_find_adapter(sc->driver_name, unit)) < 0) { sc->adp = (video_adapter_t *)malloc(sizeof(video_adapter_t), M_DEVBUF, M_NOWAIT); adp = sc->adp; bzero(adp, sizeof(video_adapter_t)); vid_init_struct(adp, sc->driver_name, sc->type, unit); if(vid_register(adp) < 0) { free(sc->adp, M_DEVBUF); goto fail; } adp->va_flags |= V_ADP_REGISTERED; adp->va_model = sc->model; adp->va_mem_base = (vm_offset_t)rman_get_virtual(sc->res); adp->va_mem_size = rman_get_end(sc->res) - rman_get_start(sc->res); adp->va_io_base = NULL; adp->va_io_size = 0; adp->va_crtc_addr = NULL; gfb_device_softcs[sc->model][unit] = sc; sc->gfbc = (struct gfb_conf *)malloc(sizeof(struct gfb_conf), M_DEVBUF, M_NOWAIT); bzero(sc->gfbc, sizeof(struct gfb_conf)); if((*vidsw[adp->va_index]->init)(unit, adp, flags)) { device_printf(dev, "Couldn't initialize adapter\n"); vid_unregister(adp); gfb_device_softcs[sc->model][unit] = NULL; free(sc->gfbc, M_DEVBUF); free(sc->adp, M_DEVBUF); goto fail; } sc->gfbc->palette.red = (u_char *)malloc(sc->gfbc->palette.count, M_DEVBUF, M_NOWAIT); sc->gfbc->palette.green = (u_char *)malloc(sc->gfbc->palette.count, M_DEVBUF, M_NOWAIT); sc->gfbc->palette.blue = (u_char *)malloc(sc->gfbc->palette.count, M_DEVBUF, M_NOWAIT); sc->gfbc->cursor_palette.red = (u_char *)malloc(sc->gfbc->cursor_palette.count, M_DEVBUF, M_NOWAIT); sc->gfbc->cursor_palette.green = (u_char *)malloc(sc->gfbc->cursor_palette.count, M_DEVBUF, M_NOWAIT); sc->gfbc->cursor_palette.blue = (u_char *)malloc(sc->gfbc->cursor_palette.count, M_DEVBUF, M_NOWAIT); if(gfb_init(unit, adp, flags)) { device_printf(dev, "Couldn't initialize framebuffer\n"); vid_unregister(adp); gfb_device_softcs[sc->model][unit] = NULL; free(sc->gfbc->cursor_palette.blue, M_DEVBUF); free(sc->gfbc->cursor_palette.green, M_DEVBUF); free(sc->gfbc->cursor_palette.red, M_DEVBUF); free(sc->gfbc->palette.blue, M_DEVBUF); free(sc->gfbc->palette.green, M_DEVBUF); free(sc->gfbc->palette.red, M_DEVBUF); free(sc->gfbc, M_DEVBUF); free(sc->adp, M_DEVBUF); goto fail; } } else { (*vidsw[va_index]->probe)(unit, &adp, (void *)sc->driver_name, flags); sc->adp = adp; sc->gfbc = gfb_device_softcs[sc->model][unit]->gfbc; gfb_device_softcs[sc->model][unit] = sc; } /* This is a back-door for PCI devices--since FreeBSD no longer supports PCI configuration-space accesses during the *configure() phase for video adapters, we cannot identify a PCI device as the console during the first call to sccnattach(). There must be a second chance for PCI adapters to be recognized as the console, and this is it... */ #ifdef __alpha__ ctb = (struct ctb *)(((caddr_t)hwrpb) + hwrpb->rpb_ctb_off); if (ctb->ctb_term_type == 3) /* Display adapter */ sccnattach(); #endif /* __alpha__ */ device_printf(dev, "Board type %s\n", sc->gfbc->name); device_printf(dev, "%d x %d, %dbpp, %s RAMDAC\n", sc->adp->va_info.vi_width, sc->adp->va_info.vi_height, sc->adp->va_info.vi_depth, sc->gfbc->ramdac_name); #ifdef FB_INSTALL_CDEV /* attach a virtual frame buffer device */ error = fb_attach(makedev(0, unit), sc->adp, sc->cdevsw); if(error) goto fail; if(bootverbose) (*vidsw[sc->adp->va_index]->diag)(sc->adp, bootverbose); #if experimental device_add_child(dev, "fb", -1); bus_generic_attach(dev); #endif /*experimental*/ #endif /*FB_INSTALL_CDEV*/ goto done; fail: if(sc->intrhand != NULL) { bus_teardown_intr(dev, sc->irq, sc->intrhand); sc->intrhand = NULL; } if(sc->irq != NULL) { rid = 0x0; bus_release_resource(dev, SYS_RES_IRQ, rid, sc->irq); sc->irq = NULL; } if(sc->res != NULL) { rid = GFB_MEM_BASE_RID; bus_release_resource(dev, SYS_RES_MEMORY, rid, sc->res); sc->res = NULL; } error = ENXIO; done: splx(s); return(error); }
static int acpi_cpu_quirks(void) { device_t acpi_dev; uint32_t val; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); /* * Bus mastering arbitration control is needed to keep caches coherent * while sleeping in C3. If it's not present but a working flush cache * instruction is present, flush the caches before entering C3 instead. * Otherwise, just disable C3 completely. */ if (AcpiGbl_FADT.Pm2ControlBlock == 0 || AcpiGbl_FADT.Pm2ControlLength == 0) { if ((AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD) && (AcpiGbl_FADT.Flags & ACPI_FADT_WBINVD_FLUSH) == 0) { cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: no BM control, using flush cache method\n")); } else { cpu_quirks |= CPU_QUIRK_NO_C3; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: no BM control, C3 not available\n")); } } /* * If we are using generic Cx mode, C3 on multiple CPUs requires using * the expensive flush cache instruction. */ if (cpu_cx_generic && mp_ncpus > 1) { cpu_quirks |= CPU_QUIRK_NO_BM_CTRL; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: SMP, using flush cache mode for C3\n")); } /* Look for various quirks of the PIIX4 part. */ acpi_dev = pci_find_device(PCI_VENDOR_INTEL, PCI_DEVICE_82371AB_3); if (acpi_dev != NULL) { switch (pci_get_revid(acpi_dev)) { /* * Disable C3 support for all PIIX4 chipsets. Some of these parts * do not report the BMIDE status to the BM status register and * others have a livelock bug if Type-F DMA is enabled. Linux * works around the BMIDE bug by reading the BM status directly * but we take the simpler approach of disabling C3 for these * parts. * * See erratum #18 ("C3 Power State/BMIDE and Type-F DMA * Livelock") from the January 2002 PIIX4 specification update. * Applies to all PIIX4 models. * * Also, make sure that all interrupts cause a "Stop Break" * event to exit from C2 state. * Also, BRLD_EN_BM (ACPI_BITREG_BUS_MASTER_RLD in ACPI-speak) * should be set to zero, otherwise it causes C2 to short-sleep. * PIIX4 doesn't properly support C3 and bus master activity * need not break out of C2. */ case PCI_REVISION_A_STEP: case PCI_REVISION_B_STEP: case PCI_REVISION_4E: case PCI_REVISION_4M: cpu_quirks |= CPU_QUIRK_NO_C3; ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: working around PIIX4 bug, disabling C3\n")); val = pci_read_config(acpi_dev, PIIX4_DEVACTB_REG, 4); if ((val & PIIX4_STOP_BREAK_MASK) != PIIX4_STOP_BREAK_MASK) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: PIIX4: enabling IRQs to generate Stop Break\n")); val |= PIIX4_STOP_BREAK_MASK; pci_write_config(acpi_dev, PIIX4_DEVACTB_REG, val, 4); } AcpiReadBitRegister(ACPI_BITREG_BUS_MASTER_RLD, &val); if (val) { ACPI_DEBUG_PRINT((ACPI_DB_INFO, "acpi_cpu: PIIX4: reset BRLD_EN_BM\n")); AcpiWriteBitRegister(ACPI_BITREG_BUS_MASTER_RLD, 0); } break; default: break; } } return (0); }
static int bwi_pci_attach(device_t dev) { struct bwi_pci_softc *psc = device_get_softc(dev); struct bwi_softc *sc = &psc->sc_sc; int error = ENXIO; sc->sc_dev = dev; /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ sc->sc_mem_rid = BWI_PCIR_BAR; sc->sc_mem_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &sc->sc_mem_rid, RF_ACTIVE); if (sc->sc_mem_res == NULL) { device_printf(dev, "cannot map register space\n"); goto bad; } sc->sc_mem_bt = rman_get_bustag(sc->sc_mem_res); sc->sc_mem_bh = rman_get_bushandle(sc->sc_mem_res); /* * Mark device invalid so any interrupts (shared or otherwise) * that arrive before the card is setup are discarded. */ sc->sc_invalid = 1; /* * Arrange interrupt line. */ sc->sc_irq_rid = 0; sc->sc_irq_res = bus_alloc_resource_any(dev, SYS_RES_IRQ, &sc->sc_irq_rid, RF_SHAREABLE|RF_ACTIVE); if (sc->sc_irq_res == NULL) { device_printf(dev, "could not map interrupt\n"); goto bad1; } if (bus_setup_intr(dev, sc->sc_irq_res, INTR_TYPE_NET | INTR_MPSAFE, NULL, bwi_intr, sc, &sc->sc_irq_handle)) { device_printf(dev, "could not establish interrupt\n"); goto bad2; } /* Get more PCI information */ sc->sc_pci_did = pci_get_device(dev); sc->sc_pci_revid = pci_get_revid(dev); sc->sc_pci_subvid = pci_get_subvendor(dev); sc->sc_pci_subdid = pci_get_subdevice(dev); error = bwi_attach(sc); if (error == 0) /* success */ return 0; bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_handle); bad2: bus_release_resource(dev, SYS_RES_IRQ, 0, sc->sc_irq_res); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, sc->sc_mem_res); bad: return (error); }