static void sii3114_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; pcireg_t scs_cmd; pci_intr_handle_t intrhandle; const char *intrstr; int channel; if (pciide_chipen(sc, pa) == 0) return; #define SII3114_RESET_BITS \ (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ SCS_CMD_FF3_RESET | SCS_CMD_FF2_RESET | \ SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET | \ SCS_CMD_IDE3_RESET | SCS_CMD_IDE2_RESET) /* * Reset everything and then unblock all of the interrupts. */ scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, scs_cmd | SII3114_RESET_BITS); delay(50 * 1000); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, scs_cmd & SCS_CMD_M66EN); delay(50 * 1000); /* * On the 3114, the BA5 register space is always enabled. In * order to use the 3114 in any sane way, we must use this BA5 * register space, and so we consider it an error if we cannot * map it. * * As a consequence of using BA5, our register mapping is different * from a normal PCI IDE controller's, and so we are unable to use * most of the common PCI IDE register mapping functions. */ if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, PCI_MAPREG_TYPE_MEM| PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, NULL) != 0) { aprint_error("%s: unable to map SATALink BA5 " "register space\n", sc->sc_wdcdev.sc_dev.dv_xname); return; } sc->sc_ba5_en = 1; aprint_verbose("%s: %dMHz PCI bus\n", sc->sc_wdcdev.sc_dev.dv_xname, (scs_cmd & SCS_CMD_M66EN) ? 66 : 33); /* * Set the Interrupt Steering bit in the IDEDMA_CMD register of * channel 2. This is required at all times for proper operation * when using the BA5 register space (otherwise interrupts from * all 4 channels won't work). */ BA5_WRITE_4(sc, 2, ba5_IDEDMA_CMD, IDEDMA_CMD_INT_STEER); aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); sii3114_mapreg_dma(sc, pa); aprint_normal("\n"); sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; sc->sc_wdcdev.PIO_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.UDMA_cap = 6; } sc->sc_wdcdev.set_modes = sii3112_setup_channel; /* We can use SControl and SStatus to probe for drives. */ sc->sc_wdcdev.drv_probe = sii3112_drv_probe; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = 4; /* Map and establish the interrupt handler. */ if (pci_intr_map(pa, &intrhandle) != 0) { aprint_error("%s: couldn't map native-PCI interrupt\n", sc->sc_wdcdev.sc_dev.dv_xname); return; } intrstr = pci_intr_string(pa->pa_pc, intrhandle); sc->sc_pci_ih = pci_intr_establish(pa->pa_pc, intrhandle, IPL_BIO, /* XXX */ pciide_pci_intr, sc); if (sc->sc_pci_ih != NULL) { aprint_normal("%s: using %s for native-PCI interrupt\n", sc->sc_wdcdev.sc_dev.dv_xname, intrstr ? intrstr : "unknown interrupt"); } else { aprint_error("%s: couldn't establish native-PCI interrupt", sc->sc_wdcdev.sc_dev.dv_xname); if (intrstr != NULL) aprint_normal(" at %s", intrstr); aprint_normal("\n"); return; } for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (sii3114_chansetup(sc, channel) == 0) continue; sii3114_mapchan(cp); } }
void mpi_pci_attach(struct device *parent, struct device *self, void *aux) { struct mpi_pci_softc *psc = (void *)self; struct mpi_softc *sc = &psc->psc_mpi; struct pci_attach_args *pa = aux; pcireg_t memtype; int r; pci_intr_handle_t ih; const char *intrstr; psc->psc_pc = pa->pa_pc; psc->psc_tag = pa->pa_tag; psc->psc_ih = NULL; sc->sc_dmat = pa->pa_dmat; sc->sc_ios = 0; /* find the appropriate memory base */ for (r = PCI_MAPREG_START; r < PCI_MAPREG_END; r += sizeof(memtype)) { memtype = pci_mapreg_type(psc->psc_pc, psc->psc_tag, r); if ((memtype & PCI_MAPREG_TYPE_MASK) == PCI_MAPREG_TYPE_MEM) break; } if (r >= PCI_MAPREG_END) { printf(": unable to locate system interface registers\n"); return; } if (pci_mapreg_map(pa, r, memtype, 0, &sc->sc_iot, &sc->sc_ioh, NULL, &sc->sc_ios, 0) != 0) { printf(": unable to map system interface registers\n"); return; } /* disable the expansion rom */ PWRITE(psc, PCI_ROM_REG, PREAD(psc, PCI_ROM_REG) & ~PCI_ROM_ENABLE); /* hook up the interrupt */ if (pci_intr_map(pa, &ih)) { printf(": unable to map interrupt\n"); goto unmap; } intrstr = pci_intr_string(psc->psc_pc, ih); psc->psc_ih = pci_intr_establish(psc->psc_pc, ih, IPL_BIO, mpi_intr, sc, sc->sc_dev.dv_xname); if (psc->psc_ih == NULL) { printf(": unable to map interrupt%s%s\n", intrstr == NULL ? "" : " at ", intrstr == NULL ? "" : intrstr); goto unmap; } printf(": %s", intrstr); if (pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_ID_REG) == PCI_ID_CODE(PCI_VENDOR_SYMBIOS, PCI_PRODUCT_SYMBIOS_1030)) sc->sc_flags |= MPI_F_SPI; if (mpi_attach(sc) != 0) { /* error printed by mpi_attach */ goto deintr; } return; deintr: pci_intr_disestablish(psc->psc_pc, psc->psc_ih); psc->psc_ih = NULL; unmap: bus_space_unmap(sc->sc_iot, sc->sc_ioh, sc->sc_ios); sc->sc_ios = 0; }
int pci_probe_device(struct pci_softc *sc, pcitag_t tag, int (*match)(const struct pci_attach_args *), struct pci_attach_args *pap) { pci_chipset_tag_t pc = sc->sc_pc; struct pci_attach_args pa; pcireg_t id, /* csr, */ pciclass, intr, bhlcr, bar, endbar; #ifdef __HAVE_PCI_MSI_MSIX pcireg_t cap; int off; #endif int ret, pin, bus, device, function, i, width; int locs[PCICF_NLOCS]; pci_decompose_tag(pc, tag, &bus, &device, &function); /* a driver already attached? */ if (sc->PCI_SC_DEVICESC(device, function).c_dev != NULL && !match) return 0; bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlcr) > 2) return 0; id = pci_conf_read(pc, tag, PCI_ID_REG); /* csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); */ pciclass = pci_conf_read(pc, tag, PCI_CLASS_REG); /* Invalid vendor ID value? */ if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) return 0; /* XXX Not invalid, but we've done this ~forever. */ if (PCI_VENDOR(id) == 0) return 0; /* Collect memory range info */ memset(sc->PCI_SC_DEVICESC(device, function).c_range, 0, sizeof(sc->PCI_SC_DEVICESC(device, function).c_range)); i = 0; switch (PCI_HDRTYPE_TYPE(bhlcr)) { case PCI_HDRTYPE_PPB: endbar = PCI_MAPREG_PPB_END; break; case PCI_HDRTYPE_PCB: endbar = PCI_MAPREG_PCB_END; break; default: endbar = PCI_MAPREG_END; break; } for (bar = PCI_MAPREG_START; bar < endbar; bar += width) { struct pci_range *r; pcireg_t type; width = 4; if (pci_mapreg_probe(pc, tag, bar, &type) == 0) continue; if (PCI_MAPREG_TYPE(type) == PCI_MAPREG_TYPE_MEM) { if (PCI_MAPREG_MEM_TYPE(type) == PCI_MAPREG_MEM_TYPE_64BIT) width = 8; r = &sc->PCI_SC_DEVICESC(device, function).c_range[i++]; if (pci_mapreg_info(pc, tag, bar, type, &r->r_offset, &r->r_size, &r->r_flags) != 0) break; if ((PCI_VENDOR(id) == PCI_VENDOR_ATI) && (bar == 0x10) && (r->r_size == 0x1000000)) { struct pci_range *nr; /* * this has to be a mach64 * split things up so each half-aperture can * be mapped PREFETCHABLE except the last page * which may contain registers */ r->r_size = 0x7ff000; r->r_flags = BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE; nr = &sc->PCI_SC_DEVICESC(device, function).c_range[i++]; nr->r_offset = r->r_offset + 0x800000; nr->r_size = 0x7ff000; nr->r_flags = BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE; } } } pa.pa_iot = sc->sc_iot; pa.pa_memt = sc->sc_memt; pa.pa_dmat = sc->sc_dmat; pa.pa_dmat64 = sc->sc_dmat64; pa.pa_pc = pc; pa.pa_bus = bus; pa.pa_device = device; pa.pa_function = function; pa.pa_tag = tag; pa.pa_id = id; pa.pa_class = pciclass; /* * Set up memory, I/O enable, and PCI command flags * as appropriate. */ pa.pa_flags = sc->sc_flags; /* * If the cache line size is not configured, then * clear the MRL/MRM/MWI command-ok flags. */ if (PCI_CACHELINE(bhlcr) == 0) { pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY| PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY); } if (sc->sc_bridgetag == NULL) { pa.pa_intrswiz = 0; pa.pa_intrtag = tag; } else { pa.pa_intrswiz = sc->sc_intrswiz + device; pa.pa_intrtag = sc->sc_intrtag; } intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG); pin = PCI_INTERRUPT_PIN(intr); pa.pa_rawintrpin = pin; if (pin == PCI_INTERRUPT_PIN_NONE) { /* no interrupt */ pa.pa_intrpin = 0; } else { /* * swizzle it based on the number of busses we're * behind and our device number. */ pa.pa_intrpin = /* XXX */ ((pin + pa.pa_intrswiz - 1) % 4) + 1; } pa.pa_intrline = PCI_INTERRUPT_LINE(intr); #ifdef __HAVE_PCI_MSI_MSIX if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSIMAP, &off, &cap)) { /* * XXX Should we enable MSI mapping ourselves on * systems that have it disabled? */ if (cap & PCI_HT_MSI_ENABLED) { uint64_t addr; if ((cap & PCI_HT_MSI_FIXED) == 0) { addr = pci_conf_read(pc, tag, off + PCI_HT_MSI_ADDR_LO); addr |= (uint64_t)pci_conf_read(pc, tag, off + PCI_HT_MSI_ADDR_HI) << 32; } else addr = PCI_HT_MSI_FIXED_ADDR; /* * XXX This will fail to enable MSI on systems * that don't use the canonical address. */ if (addr == PCI_HT_MSI_FIXED_ADDR) { pa.pa_flags |= PCI_FLAGS_MSI_OKAY; pa.pa_flags |= PCI_FLAGS_MSIX_OKAY; } } } #endif if (match != NULL) { ret = (*match)(&pa); if (ret != 0 && pap != NULL) *pap = pa; } else { struct pci_child *c; locs[PCICF_DEV] = device; locs[PCICF_FUNCTION] = function; c = &sc->PCI_SC_DEVICESC(device, function); pci_conf_capture(pc, tag, &c->c_conf); if (pci_get_powerstate(pc, tag, &c->c_powerstate) == 0) c->c_psok = true; else c->c_psok = false; c->c_dev = config_found_sm_loc(sc->sc_dev, "pci", locs, &pa, pciprint, config_stdsubmatch); ret = (c->c_dev != NULL); } return ret; }
static void auacer_attach(struct device *parent, struct device *self, void *aux) { struct auacer_softc *sc; struct pci_attach_args *pa; pci_intr_handle_t ih; bus_size_t aud_size; pcireg_t v; const char *intrstr; int i; sc = (struct auacer_softc *)self; pa = aux; aprint_normal(": Acer Labs M5455 Audio controller\n"); if (pci_mapreg_map(pa, 0x10, PCI_MAPREG_TYPE_IO, 0, &sc->iot, &sc->aud_ioh, NULL, &aud_size)) { aprint_error(": can't map i/o space\n"); return; } sc->sc_pc = pa->pa_pc; sc->sc_pt = pa->pa_tag; sc->dmat = pa->pa_dmat; sc->sc_dmamap_flags = BUS_DMA_COHERENT; /* XXX remove */ /* enable bus mastering */ v = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, v | PCI_COMMAND_MASTER_ENABLE); /* Map and establish the interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(&sc->sc_dev, "can't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih); sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_AUDIO, auacer_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(&sc->sc_dev, "can't establish interrupt"); if (intrstr != NULL) aprint_normal(" at %s", intrstr); aprint_normal("\n"); return; } aprint_normal_dev(&sc->sc_dev, "interrupting at %s\n", intrstr); strlcpy(sc->sc_audev.name, "M5455 AC97", MAX_AUDIO_DEV_LEN); snprintf(sc->sc_audev.version, MAX_AUDIO_DEV_LEN, "0x%02x", PCI_REVISION(pa->pa_class)); strlcpy(sc->sc_audev.config, device_xname(&sc->sc_dev), MAX_AUDIO_DEV_LEN); /* Set up DMA lists. */ auacer_alloc_cdata(sc); sc->sc_pcmo.dmalist = sc->sc_cdata->ic_dmalist_pcmo; sc->sc_pcmo.ptr = 0; sc->sc_pcmo.port = ALI_BASE_PO; DPRINTF(ALI_DEBUG_DMA, ("auacer_attach: lists %p\n", sc->sc_pcmo.dmalist)); sc->host_if.arg = sc; sc->host_if.attach = auacer_attach_codec; sc->host_if.read = auacer_read_codec; sc->host_if.write = auacer_write_codec; sc->host_if.reset = auacer_reset_codec; if (ac97_attach(&sc->host_if, self) != 0) return; /* setup audio_format */ memcpy(sc->sc_formats, auacer_formats, sizeof(auacer_formats)); if (!AC97_IS_4CH(sc->codec_if)) AUFMT_INVALIDATE(&sc->sc_formats[AUACER_FORMATS_4CH]); if (!AC97_IS_6CH(sc->codec_if)) AUFMT_INVALIDATE(&sc->sc_formats[AUACER_FORMATS_6CH]); if (AC97_IS_FIXED_RATE(sc->codec_if)) { for (i = 0; i < AUACER_NFORMATS; i++) { sc->sc_formats[i].frequency_type = 1; sc->sc_formats[i].frequency[0] = 48000; } } if (0 != auconv_create_encodings(sc->sc_formats, AUACER_NFORMATS, &sc->sc_encodings)) { return; } audio_attach_mi(&auacer_hw_if, sc, &sc->sc_dev); auacer_reset(sc); if (!pmf_device_register(self, NULL, auacer_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); }
void yds_attach(struct device *parent, struct device *self, void *aux) { struct yds_softc *sc = (struct yds_softc *)self; struct pci_attach_args *pa = (struct pci_attach_args *)aux; pci_chipset_tag_t pc = pa->pa_pc; char const *intrstr; pci_intr_handle_t ih; bus_size_t size; pcireg_t reg; int i; /* Map register to memory */ if (pci_mapreg_map(pa, YDS_PCI_MBA, PCI_MAPREG_TYPE_MEM, 0, &sc->memt, &sc->memh, NULL, &size, 0)) { printf(": can't map mem space\n"); return; } /* Map and establish the interrupt. */ if (pci_intr_map(pa, &ih)) { printf(": couldn't map interrupt\n"); bus_space_unmap(sc->memt, sc->memh, size); return; } intrstr = pci_intr_string(pc, ih); sc->sc_ih = pci_intr_establish(pc, ih, IPL_AUDIO | IPL_MPSAFE, yds_intr, sc, self->dv_xname); if (sc->sc_ih == NULL) { printf(": couldn't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); bus_space_unmap(sc->memt, sc->memh, size); return; } printf(": %s\n", intrstr); sc->sc_dmatag = pa->pa_dmat; sc->sc_pc = pc; sc->sc_pcitag = pa->pa_tag; sc->sc_id = pa->pa_id; sc->sc_revision = PCI_REVISION(pa->pa_class); sc->sc_flags = yds_get_dstype(sc->sc_id); if (sc->sc_dev.dv_cfdata->cf_flags & YDS_CAP_LEGACY_SMOD_DISABLE) sc->sc_flags |= YDS_CAP_LEGACY_SMOD_DISABLE; #ifdef AUDIO_DEBUG if (ydsdebug) printf("%s: chip has %b\n", sc->sc_dev.dv_xname, YDS_CAP_BITS, sc->sc_flags); #endif /* Disable legacy mode */ reg = pci_conf_read(pc, pa->pa_tag, YDS_PCI_LEGACY); pci_conf_write(pc, pa->pa_tag, YDS_PCI_LEGACY, reg & YDS_PCI_LEGACY_LAD); /* Mute all volumes */ for (i = 0x80; i < 0xc0; i += 2) YWRITE2(sc, i, 0); sc->sc_legacy_iot = pa->pa_iot; mountroothook_establish(yds_attachhook, sc); }
static void acer_setup_channel(struct ata_channel *chp) { struct ata_drive_datas *drvp; int drive, s; u_int32_t acer_fifo_udma; u_int32_t idedma_ctl; struct pciide_channel *cp = (struct pciide_channel*)chp; struct pciide_softc *sc = CHAN_TO_PCIIDE(chp); idedma_ctl = 0; acer_fifo_udma = pci_conf_read(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA); ATADEBUG_PRINT(("acer_setup_channel: old fifo/udma reg 0x%x\n", acer_fifo_udma), DEBUG_PROBE); /* setup DMA if needed */ pciide_channel_dma_setup(cp); if ((chp->ch_drive[0].drive_flags | chp->ch_drive[1].drive_flags) & ATA_DRIVE_UDMA) { /* check 80 pins cable */ if (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4A) & ACER_0x4A_80PIN(chp->ch_channel)) { if (chp->ch_drive[0].UDMA_mode > 2) chp->ch_drive[0].UDMA_mode = 2; if (chp->ch_drive[1].UDMA_mode > 2) chp->ch_drive[1].UDMA_mode = 2; } } for (drive = 0; drive < 2; drive++) { drvp = &chp->ch_drive[drive]; /* If no drive, skip */ if (drvp->drive_type == ATA_DRIVET_NONE) continue; ATADEBUG_PRINT(("acer_setup_channel: old timings reg for " "channel %d drive %d 0x%x\n", chp->ch_channel, drive, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_IDETIM(chp->ch_channel, drive))), DEBUG_PROBE); /* clear FIFO/DMA mode */ acer_fifo_udma &= ~(ACER_FTH_OPL(chp->ch_channel, drive, 0x3) | ACER_UDMA_EN(chp->ch_channel, drive) | ACER_UDMA_TIM(chp->ch_channel, drive, 0x7)); /* add timing values, setup DMA if needed */ if ((drvp->drive_flags & ATA_DRIVE_DMA) == 0 && (drvp->drive_flags & ATA_DRIVE_UDMA) == 0) { acer_fifo_udma |= ACER_FTH_OPL(chp->ch_channel, drive, 0x1); goto pio; } acer_fifo_udma |= ACER_FTH_OPL(chp->ch_channel, drive, 0x2); if (drvp->drive_flags & ATA_DRIVE_UDMA) { /* use Ultra/DMA */ s = splbio(); drvp->drive_flags &= ~ATA_DRIVE_DMA; splx(s); acer_fifo_udma |= ACER_UDMA_EN(chp->ch_channel, drive); acer_fifo_udma |= ACER_UDMA_TIM(chp->ch_channel, drive, acer_udma[drvp->UDMA_mode]); /* XXX disable if one drive < UDMA3 ? */ if (drvp->UDMA_mode >= 3) { pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) | ACER_0x4B_UDMA66); } } else { /* * use Multiword DMA * Timings will be used for both PIO and DMA, * so adjust DMA mode if needed */ if (drvp->PIO_mode > (drvp->DMA_mode + 2)) drvp->PIO_mode = drvp->DMA_mode + 2; if (drvp->DMA_mode + 2 > (drvp->PIO_mode)) drvp->DMA_mode = (drvp->PIO_mode > 2) ? drvp->PIO_mode - 2 : 0; if (drvp->DMA_mode == 0) drvp->PIO_mode = 0; } idedma_ctl |= IDEDMA_CTL_DRV_DMA(drive); pio: pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_IDETIM(chp->ch_channel, drive), acer_pio[drvp->PIO_mode]); } ATADEBUG_PRINT(("acer_setup_channel: new fifo/udma reg 0x%x\n", acer_fifo_udma), DEBUG_PROBE); pci_conf_write(sc->sc_pc, sc->sc_tag, ACER_FTH_UDMA, acer_fifo_udma); if (idedma_ctl != 0) { /* Add software bits in status register */ bus_space_write_1(sc->sc_dma_iot, cp->dma_iohs[IDEDMA_CTL], 0, idedma_ctl); } }
static void epic_pci_attach(device_t parent, device_t self, void *aux) { struct epic_pci_softc *psc = device_private(self); struct epic_softc *sc = &psc->sc_epic; struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; const char *intrstr = NULL; const struct epic_pci_product *epp; const struct epic_pci_subsys_info *esp; bus_space_tag_t iot, memt; bus_space_handle_t ioh, memh; int ioh_valid, memh_valid; int error; sc->sc_dev = self; epp = epic_pci_lookup(pa); if (epp == NULL) { aprint_normal("\n"); panic("%s: impossible", __func__); } pci_aprint_devinfo_fancy(pa, "Ethernet controller", epp->epp_name, 1); /* power up chip */ if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) && error != EOPNOTSUPP) { aprint_error_dev(self, "cannot activate %d\n", error); return; } /* * Map the device. */ ioh_valid = (pci_mapreg_map(pa, EPIC_PCI_IOBA, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh, NULL, NULL) == 0); memh_valid = (pci_mapreg_map(pa, EPIC_PCI_MMBA, PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, &memt, &memh, NULL, NULL) == 0); if (memh_valid) { sc->sc_st = memt; sc->sc_sh = memh; } else if (ioh_valid) { sc->sc_st = iot; sc->sc_sh = ioh; } else { aprint_error_dev(self, "unable to map device registers\n"); return; } sc->sc_dmat = pa->pa_dmat; /* Make sure bus mastering is enabled. */ pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | PCI_COMMAND_MASTER_ENABLE); /* * Map and establish our interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "unable to map interrupt\n"); return; } intrstr = pci_intr_string(pc, ih); psc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, epic_intr, sc); if (psc->sc_ih == NULL) { aprint_error_dev(self, "unable to establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); esp = epic_pci_subsys_lookup(pa); if (esp) sc->sc_hwflags = esp->flags; /* * Finish off the attach. */ epic_attach(sc); }
void auvia_attach(struct device *parent, struct device *self, void *aux) { struct pci_attach_args *pa = aux; struct auvia_softc *sc = (struct auvia_softc *) self; const char *intrstr = NULL; struct mixer_ctrl ctl; pci_chipset_tag_t pc = pa->pa_pc; pcitag_t pt = pa->pa_tag; pci_intr_handle_t ih; bus_size_t iosize; pcireg_t pr; int r, i; sc->sc_play.sc_base = AUVIA_PLAY_BASE; sc->sc_record.sc_base = AUVIA_RECORD_BASE; if (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_VIATECH_VT8233_AC97) { sc->sc_flags |= AUVIA_FLAGS_VT8233; sc->sc_play.sc_base = VIA8233_MP_BASE; sc->sc_record.sc_base = VIA8233_WR_BASE; } if (pci_mapreg_map(pa, 0x10, PCI_MAPREG_TYPE_IO, 0, &sc->sc_iot, &sc->sc_ioh, NULL, &iosize, 0)) { printf(": can't map i/o space\n"); return; } sc->sc_dmat = pa->pa_dmat; sc->sc_pc = pc; sc->sc_pt = pt; if (pci_intr_map(pa, &ih)) { printf(": couldn't map interrupt\n"); bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); return; } intrstr = pci_intr_string(pc, ih); sc->sc_ih = pci_intr_establish(pc, ih, IPL_AUDIO, auvia_intr, sc, sc->sc_dev.dv_xname); if (sc->sc_ih == NULL) { printf(": couldn't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); return; } printf(": %s\n", intrstr); /* disable SBPro compat & others */ pr = pci_conf_read(pc, pt, AUVIA_PCICONF_JUNK); pr &= ~AUVIA_PCICONF_ENABLES; /* clear compat function enables */ /* XXX what to do about MIDI, FM, joystick? */ pr |= (AUVIA_PCICONF_ACLINKENAB | AUVIA_PCICONF_ACNOTRST | AUVIA_PCICONF_ACVSR | AUVIA_PCICONF_ACSGD); pr &= ~(AUVIA_PCICONF_ACFM | AUVIA_PCICONF_ACSB); pci_conf_write(pc, pt, AUVIA_PCICONF_JUNK, pr); sc->sc_pci_junk = pr; sc->host_if.arg = sc; sc->host_if.attach = auvia_attach_codec; sc->host_if.read = auvia_read_codec; sc->host_if.write = auvia_write_codec; sc->host_if.reset = auvia_reset_codec; sc->host_if.spdif_event = auvia_spdif_event; if ((r = ac97_attach(&sc->host_if)) != 0) { printf("%s: can't attach codec (error 0x%X)\n", sc->sc_dev.dv_xname, r); pci_intr_disestablish(pc, sc->sc_ih); bus_space_unmap(sc->sc_iot, sc->sc_ioh, iosize); return; } /* disable mutes */ for (i = 0; i < 4; i++) { static struct { char *class, *device; } d[] = { { AudioCoutputs, AudioNmaster}, { AudioCinputs, AudioNdac}, { AudioCinputs, AudioNcd}, { AudioCrecord, AudioNvolume}, }; ctl.type = AUDIO_MIXER_ENUM; ctl.un.ord = 0; ctl.dev = sc->codec_if->vtbl->get_portnum_by_name(sc->codec_if, d[i].class, d[i].device, AudioNmute); auvia_set_port(sc, &ctl); } /* set a reasonable default volume */ ctl.type = AUDIO_MIXER_VALUE; ctl.un.value.num_channels = 2; ctl.un.value.level[AUDIO_MIXER_LEVEL_LEFT] = \ ctl.un.value.level[AUDIO_MIXER_LEVEL_RIGHT] = 199; ctl.dev = sc->codec_if->vtbl->get_portnum_by_name(sc->codec_if, AudioCoutputs, AudioNmaster, NULL); auvia_set_port(sc, &ctl); audio_attach_mi(&auvia_hw_if, sc, &sc->sc_dev); sc->codec_if->vtbl->unlock(sc->codec_if); }
void pciaddr_resource_manage(struct shpcic_softc *sc, pci_chipset_tag_t pc, pcitag_t tag, pciaddr_resource_manage_func_t func) { struct extent *ex; pcireg_t val, mask; bus_addr_t addr; bus_size_t size; int error, mapreg, type, reg_start, reg_end, width; val = pci_conf_read(pc, tag, PCI_BHLC_REG); switch (PCI_HDRTYPE_TYPE(val)) { default: printf("WARNING: unknown PCI device header.\n"); sc->nbogus++; return; case 0: reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_END; break; case 1: /* PCI-PCI bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PPB_END; break; case 2: /* PCI-CardBus bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PCB_END; break; } error = 0; for (mapreg = reg_start; mapreg < reg_end; mapreg += width) { /* inquire PCI device bus space requirement */ val = pci_conf_read(pc, tag, mapreg); pci_conf_write(pc, tag, mapreg, ~0); mask = pci_conf_read(pc, tag, mapreg); pci_conf_write(pc, tag, mapreg, val); type = PCI_MAPREG_TYPE(val); width = 4; if (type == PCI_MAPREG_TYPE_MEM) { if (PCI_MAPREG_MEM_TYPE(val) == PCI_MAPREG_MEM_TYPE_64BIT) { /* XXX We could examine the upper 32 bits * XXX of the BAR here, but we are totally * XXX unprepared to handle a non-zero value, * XXX either here or anywhere else in * XXX i386-land. * XXX So just arrange to not look at the * XXX upper 32 bits, lest we misinterpret * XXX it as a 32-bit BAR set to zero. */ width = 8; } addr = PCI_MAPREG_MEM_ADDR(val); size = PCI_MAPREG_MEM_SIZE(mask); ex = sc->extent_mem; /* XXX */ /* * sh-IPL allocates a low address for PCI memory * on px-eh systems, clobber it so it gets 'remapped' */ if (addr != 0 && addr < sc->sc_membus_space.bus_base) { val = 0; pci_conf_write(pc, tag, mapreg, val); } } else { /* XXX some devices give 32bit value */ if (sc->sc_iobus_space.bus_base != PCIADDR_PORT_START) { /* * if the bus base is not 0 skew all addresses */ val &= PCIADDR_PORT_END; val |= sc->sc_iobus_space.bus_base; pci_conf_write(pc, tag, mapreg, val); } addr = PCI_MAPREG_IO_ADDR(val); size = PCI_MAPREG_IO_SIZE(mask); ex = sc->extent_port; } if (!size) /* unused register */ continue; /* reservation/allocation phase */ error += (*func) (sc, pc, tag, mapreg, ex, type, &addr, size); PCIBIOS_PRINTV(("\t%02xh %s 0x%08x 0x%08x\n", mapreg, type ? "port" : "mem ", (unsigned int)addr, (unsigned int)size)); } if (error) sc->nbogus++; PCIBIOS_PRINTV(("\t\t[%s]\n", error ? "NG" : "OK")); }
static void ste_attach(device_t parent, device_t self, void *aux) { struct ste_softc *sc = device_private(self); struct pci_attach_args *pa = aux; struct ifnet *ifp = &sc->sc_ethercom.ec_if; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; const char *intrstr = NULL; bus_space_tag_t iot, memt; bus_space_handle_t ioh, memh; bus_dma_segment_t seg; int ioh_valid, memh_valid; int i, rseg, error; const struct ste_product *sp; uint8_t enaddr[ETHER_ADDR_LEN]; uint16_t myea[ETHER_ADDR_LEN / 2]; callout_init(&sc->sc_tick_ch, 0); sp = ste_lookup(pa); if (sp == NULL) { printf("\n"); panic("ste_attach: impossible"); } printf(": %s\n", sp->ste_name); /* * Map the device. */ ioh_valid = (pci_mapreg_map(pa, STE_PCI_IOBA, PCI_MAPREG_TYPE_IO, 0, &iot, &ioh, NULL, NULL) == 0); memh_valid = (pci_mapreg_map(pa, STE_PCI_MMBA, PCI_MAPREG_TYPE_MEM|PCI_MAPREG_MEM_TYPE_32BIT, 0, &memt, &memh, NULL, NULL) == 0); if (memh_valid) { sc->sc_st = memt; sc->sc_sh = memh; } else if (ioh_valid) { sc->sc_st = iot; sc->sc_sh = ioh; } else { aprint_error_dev(&sc->sc_dev, "unable to map device registers\n"); return; } sc->sc_dmat = pa->pa_dmat; /* Enable bus mastering. */ pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG) | PCI_COMMAND_MASTER_ENABLE); /* power up chip */ if ((error = pci_activate(pa->pa_pc, pa->pa_tag, self, NULL)) && error != EOPNOTSUPP) { aprint_error_dev(&sc->sc_dev, "cannot activate %d\n", error); return; } /* * Map and establish our interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(&sc->sc_dev, "unable to map interrupt\n"); return; } intrstr = pci_intr_string(pc, ih); sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ste_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(&sc->sc_dev, "unable to establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); return; } printf("%s: interrupting at %s\n", device_xname(&sc->sc_dev), intrstr); /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ste_control_data), PAGE_SIZE, 0, &seg, 1, &rseg, 0)) != 0) { aprint_error_dev(&sc->sc_dev, "unable to allocate control data, error = %d\n", error); goto fail_0; } if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, sizeof(struct ste_control_data), (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) { aprint_error_dev(&sc->sc_dev, "unable to map control data, error = %d\n", error); goto fail_1; } if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ste_control_data), 1, sizeof(struct ste_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { aprint_error_dev(&sc->sc_dev, "unable to create control data DMA map, " "error = %d\n", error); goto fail_2; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct ste_control_data), NULL, 0)) != 0) { aprint_error_dev(&sc->sc_dev, "unable to load control data DMA map, error = %d\n", error); goto fail_3; } /* * Create the transmit buffer DMA maps. */ for (i = 0; i < STE_NTXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, STE_NTXFRAGS, MCLBYTES, 0, 0, &sc->sc_txsoft[i].ds_dmamap)) != 0) { aprint_error_dev(&sc->sc_dev, "unable to create tx DMA map %d, " "error = %d\n", i, error); goto fail_4; } } /* * Create the receive buffer DMA maps. */ for (i = 0; i < STE_NRXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].ds_dmamap)) != 0) { aprint_error_dev(&sc->sc_dev, "unable to create rx DMA map %d, " "error = %d\n", i, error); goto fail_5; } sc->sc_rxsoft[i].ds_mbuf = NULL; } /* * Reset the chip to a known state. */ ste_reset(sc, AC_GlobalReset | AC_RxReset | AC_TxReset | AC_DMA | AC_FIFO | AC_Network | AC_Host | AC_AutoInit | AC_RstOut); /* * Read the Ethernet address from the EEPROM. */ for (i = 0; i < 3; i++) { ste_read_eeprom(sc, STE_EEPROM_StationAddress0 + i, &myea[i]); myea[i] = le16toh(myea[i]); } memcpy(enaddr, myea, sizeof(enaddr)); printf("%s: Ethernet address %s\n", device_xname(&sc->sc_dev), ether_sprintf(enaddr)); /* * Initialize our media structures and probe the MII. */ sc->sc_mii.mii_ifp = ifp; sc->sc_mii.mii_readreg = ste_mii_readreg; sc->sc_mii.mii_writereg = ste_mii_writereg; sc->sc_mii.mii_statchg = ste_mii_statchg; sc->sc_ethercom.ec_mii = &sc->sc_mii; ifmedia_init(&sc->sc_mii.mii_media, IFM_IMASK, ether_mediachange, ether_mediastatus); mii_attach(&sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); } else ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); ifp = &sc->sc_ethercom.ec_if; strlcpy(ifp->if_xname, device_xname(&sc->sc_dev), IFNAMSIZ); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = ste_ioctl; ifp->if_start = ste_start; ifp->if_watchdog = ste_watchdog; ifp->if_init = ste_init; ifp->if_stop = ste_stop; IFQ_SET_READY(&ifp->if_snd); /* * Default the transmit threshold to 128 bytes. */ sc->sc_txthresh = 128; /* * Disable MWI if the PCI layer tells us to. */ sc->sc_DMACtrl = 0; if ((pa->pa_flags & PCI_FLAGS_MWI_OKAY) == 0) sc->sc_DMACtrl |= DC_MWIDisable; /* * We can support 802.1Q VLAN-sized frames. */ sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, enaddr); /* * Make sure the interface is shutdown during reboot. */ sc->sc_sdhook = shutdownhook_establish(ste_shutdown, sc); if (sc->sc_sdhook == NULL) printf("%s: WARNING: unable to establish shutdown hook\n", device_xname(&sc->sc_dev)); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_5: for (i = 0; i < STE_NRXDESC; i++) { if (sc->sc_rxsoft[i].ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].ds_dmamap); } fail_4: for (i = 0; i < STE_NTXDESC; i++) { if (sc->sc_txsoft[i].ds_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].ds_dmamap); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); fail_2: bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, sizeof(struct ste_control_data)); fail_1: bus_dmamem_free(sc->sc_dmat, &seg, rseg); fail_0: return; }
static void coram_attach(device_t parent, device_t self, void *aux) { struct coram_softc *sc = device_private(self); const struct pci_attach_args *pa = aux; pci_intr_handle_t ih; pcireg_t reg; const char *intrstr; struct coram_iic_softc *cic; uint32_t value; int i; #ifdef CORAM_ATTACH_I2C struct i2cbus_attach_args iba; #endif sc->sc_dev = self; pci_aprint_devinfo(pa, NULL); reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); sc->sc_board = coram_board_lookup(PCI_VENDOR(reg), PCI_PRODUCT(reg)); KASSERT(sc->sc_board != NULL); if (pci_mapreg_map(pa, CX23885_MMBASE, PCI_MAPREG_TYPE_MEM, 0, &sc->sc_memt, &sc->sc_memh, NULL, &sc->sc_mems)) { aprint_error_dev(self, "couldn't map memory space\n"); return; } sc->sc_dmat = pa->pa_dmat; sc->sc_pc = pa->pa_pc; if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih); sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_VM, coram_intr, self); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); /* set master */ reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); reg |= PCI_COMMAND_MASTER_ENABLE; pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg); /* I2C */ for(i = 0; i < I2C_NUM; i++) { cic = &sc->sc_iic[i]; cic->cic_sc = sc; if (bus_space_subregion(sc->sc_memt, sc->sc_memh, I2C_BASE + (I2C_SIZE * i), I2C_SIZE, &cic->cic_regh)) panic("failed to subregion i2c"); mutex_init(&cic->cic_busmutex, MUTEX_DRIVER, IPL_NONE); cic->cic_i2c.ic_cookie = cic; cic->cic_i2c.ic_acquire_bus = coram_iic_acquire_bus; cic->cic_i2c.ic_release_bus = coram_iic_release_bus; cic->cic_i2c.ic_exec = coram_iic_exec; #ifdef CORAM_ATTACH_I2C /* attach iic(4) */ memset(&iba, 0, sizeof(iba)); iba.iba_tag = &cic->cic_i2c; iba.iba_type = I2C_TYPE_SMBUS; cic->cic_i2cdev = config_found_ia(self, "i2cbus", &iba, iicbus_print); #endif } /* HVR1250 GPIO */ value = bus_space_read_4(sc->sc_memt, sc->sc_memh, 0x110010); #if 1 value &= ~0x00010001; bus_space_write_4(sc->sc_memt, sc->sc_memh, 0x110010, value); delay(5000); #endif value |= 0x00010001; bus_space_write_4(sc->sc_memt, sc->sc_memh, 0x110010, value); #if 0 int i; uint8_t foo[256]; uint8_t bar; bar = 0; // seeprom_bootstrap_read(&sc->sc_i2c, 0x50, 0, 256, foo, 256); iic_acquire_bus(&sc->sc_i2c, I2C_F_POLL); iic_exec(&sc->sc_i2c, I2C_OP_READ_WITH_STOP, 0x50, &bar, 1, foo, 256, I2C_F_POLL); iic_release_bus(&sc->sc_i2c, I2C_F_POLL); printf("\n"); for ( i = 0; i < 256; i++) { if ( (i % 8) == 0 ) printf("%02x: ", i); printf("%02x", foo[i]); if ( (i % 8) == 7 ) printf("\n"); else printf(" "); } printf("\n"); #endif sc->sc_demod = cx24227_open(sc->sc_dev, &sc->sc_iic[0].cic_i2c, 0x19); if (sc->sc_demod == NULL) aprint_error_dev(self, "couldn't open cx24227\n"); sc->sc_tuner = mt2131_open(sc->sc_dev, &sc->sc_iic[0].cic_i2c, 0x61); if (sc->sc_tuner == NULL) aprint_error_dev(self, "couldn't open mt2131\n"); coram_mpeg_attach(sc); if (!pmf_device_register(self, NULL, coram_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); return; }
void gdium_attach_hook(device_t parent, device_t self, struct pcibus_attach_args *pba) { pci_chipset_tag_t pc = pba->pba_pc; pcireg_t id; pcitag_t tag; #ifdef notyet int bar; #endif #if 0 pcireg_t reg; int dev, func; #endif if (pba->pba_bus != 0) return; #ifdef notyet /* * Clear all BAR of the mini PCI slot; PMON did not initialize * it, and we do not want it to conflict with anything. */ tag = pci_make_tag(pc, 0, 13, 0); for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END; bar += 4) pci_conf_write(pc, tag, bar, 0); #else /* * Force a non conflicting BAR for the wireless controller, * until proper resource configuration code is added to * bonito (work in progress). */ tag = pci_make_tag(pc, 0, 13, 0); pci_conf_write(pc, tag, PCI_MAPREG_START, 0x06228000); #endif /* * Figure out which motherboard we are running on. * Might not be good enough... */ tag = pci_make_tag(pc, 0, 17, 0); id = pci_conf_read(pc, tag, PCI_ID_REG); if (id == PCI_ID_CODE(PCI_VENDOR_NEC, PCI_PRODUCT_NEC_USB)) gdium_revision = 1; #if 0 /* * Tweak the usb controller capabilities. */ for (dev = pci_bus_maxdevs(pc, 0); dev >= 0; dev--) { tag = pci_make_tag(pc, 0, dev, 0); id = pci_conf_read(pc, tag, PCI_ID_REG); if (id != PCI_ID_CODE(PCI_VENDOR_NEC, PCI_PRODUCT_NEC_USB)) continue; if (gdium_revision != 0) { reg = pci_conf_read(pc, tag, 0xe0); /* enable ports 1 and 2 */ reg |= 0x00000003; pci_conf_write(pc, tag, 0xe0, reg); } else { for (func = 0; func < 2; func++) { tag = pci_make_tag(pc, 0, dev, func); id = pci_conf_read(pc, tag, PCI_ID_REG); if (PCI_VENDOR(id) != PCI_VENDOR_NEC) continue; if (PCI_PRODUCT(id) != PCI_PRODUCT_NEC_USB && PCI_PRODUCT(id) != PCI_PRODUCT_NEC_USB2) continue; reg = pci_conf_read(pc, tag, 0xe0); /* enable ports 1 and 3, disable port 2 */ reg &= ~0x00000007; reg |= 0x00000005; pci_conf_write(pc, tag, 0xe0, reg); pci_conf_write(pc, tag, 0xe4, 0x00000020); } } } #endif }
static pciconf_bus_t * query_bus(pciconf_bus_t *parent, pciconf_dev_t *pd, int dev) { pciconf_bus_t *pb; pcireg_t busreg, io, pmem; pciconf_win_t *pi, *pm; pb = malloc (sizeof (pciconf_bus_t), M_DEVBUF, M_NOWAIT); if (!pb) panic("Unable to allocate memory for PCI configuration."); pb->cacheline_size = parent->cacheline_size; pb->parent_bus = parent; alloc_busno(parent, pb); if (pci_conf_debug) printf("PCI bus bridge covers busses %d-%d\n", pb->busno, pb->last_busno); busreg = parent->busno << PCI_BRIDGE_BUS_PRIMARY_SHIFT; busreg |= pb->busno << PCI_BRIDGE_BUS_SECONDARY_SHIFT; busreg |= pb->last_busno << PCI_BRIDGE_BUS_SUBORDINATE_SHIFT; pci_conf_write(parent->pc, pd->tag, PCI_BRIDGE_BUS_REG, busreg); pb->swiz = parent->swiz + dev; pb->ioext = NULL; pb->memext = NULL; pb->pmemext = NULL; pb->pc = parent->pc; pb->io_total = pb->mem_total = pb->pmem_total = 0; pb->io_32bit = 0; if (parent->io_32bit) { io = pci_conf_read(parent->pc, pd->tag, PCI_BRIDGE_STATIO_REG); if (PCI_BRIDGE_IO_32BITS(io)) { pb->io_32bit = 1; } } pb->pmem_64bit = 0; if (parent->pmem_64bit) { pmem = pci_conf_read(parent->pc, pd->tag, PCI_BRIDGE_PREFETCHMEM_REG); if (PCI_BRIDGE_PREFETCHMEM_64BITS(pmem)) { pb->pmem_64bit = 1; } } if (probe_bus(pb)) { printf("Failed to probe bus %d\n", pb->busno); goto err; } if (pb->io_total > 0) { if (parent->niowin >= MAX_CONF_IO) { printf("pciconf: too many I/O windows\n"); goto err; } pb->io_total |= 0xfff; /* Round up */ pi = get_io_desc(parent, pb->io_total); pi->dev = pd; pi->reg = 0; pi->size = pb->io_total; pi->align = 0x1000; /* 4K alignment */ pi->prefetch = 0; parent->niowin++; parent->io_total += pb->io_total; } if (pb->mem_total > 0) { if (parent->nmemwin >= MAX_CONF_MEM) { printf("pciconf: too many MEM windows\n"); goto err; } pb->mem_total |= 0xfffff; /* Round up */ pm = get_mem_desc(parent, pb->mem_total); pm->dev = pd; pm->reg = 0; pm->size = pb->mem_total; pm->align = 0x100000; /* 1M alignment */ pm->prefetch = 0; parent->nmemwin++; parent->mem_total += pb->mem_total; } if (pb->pmem_total > 0) { if (parent->nmemwin >= MAX_CONF_MEM) { printf("pciconf: too many MEM windows\n"); goto err; } pb->pmem_total |= 0xfffff; /* Round up */ pm = get_mem_desc(parent, pb->pmem_total); pm->dev = pd; pm->reg = 0; pm->size = pb->pmem_total; pm->align = 0x100000; /* 1M alignment */ pm->prefetch = 1; parent->nmemwin++; parent->pmem_total += pb->pmem_total; } return pb; err: free(pb, M_DEVBUF); return NULL; }
/* * Set up bus common stuff, then loop over devices & functions. * If we find something, call pci_do_device_query()). */ static int probe_bus(pciconf_bus_t *pb) { int device, maxdevs; #ifdef __PCI_BUS_DEVORDER char devs[32]; int i; #endif maxdevs = pci_bus_maxdevs(pb->pc, pb->busno); pb->ndevs = 0; pb->niowin = 0; pb->nmemwin = 0; pb->freq_66 = 1; pb->fast_b2b = 1; pb->prefetch = 1; pb->max_mingnt = 0; /* we are looking for the maximum */ pb->min_maxlat = 0x100; /* we are looking for the minimum */ pb->bandwidth_used = 0; #ifdef __PCI_BUS_DEVORDER pci_bus_devorder(pb->pc, pb->busno, devs); for (i=0; (device=devs[i]) < 32 && device >= 0; i++) { #else for (device=0; device < maxdevs; device++) { #endif pcitag_t tag; pcireg_t id, bhlcr; int function, nfunction; int confmode; tag = pci_make_tag(pb->pc, pb->busno, device, 0); if (pci_conf_debug) { print_tag(pb->pc, tag); } id = pci_conf_read(pb->pc, tag, PCI_ID_REG); if (pci_conf_debug) { printf("id=%x: Vendor=%x, Product=%x\n", id, PCI_VENDOR(id),PCI_PRODUCT(id)); } /* Invalid vendor ID value? */ if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) continue; bhlcr = pci_conf_read(pb->pc, tag, PCI_BHLC_REG); nfunction = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1; for (function = 0 ; function < nfunction ; function++) { tag = pci_make_tag(pb->pc, pb->busno, device, function); id = pci_conf_read(pb->pc, tag, PCI_ID_REG); if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) continue; if (pb->ndevs+1 < MAX_CONF_DEV) { if (pci_conf_debug) { print_tag(pb->pc, tag); printf("Found dev 0x%04x 0x%04x -- " "really probing.\n", PCI_VENDOR(id), PCI_PRODUCT(id)); } #ifdef __HAVE_PCI_CONF_HOOK confmode = pci_conf_hook(pb->pc, pb->busno, device, function, id); if (confmode == 0) continue; #else /* * Don't enable expansion ROMS -- some cards * share address decoders between the EXPROM * and PCI memory space, and enabling the ROM * when not needed will cause all sorts of * lossage. */ confmode = PCI_CONF_ALL & ~PCI_CONF_MAP_ROM; #endif if (pci_do_device_query(pb, tag, device, function, confmode)) return -1; pb->ndevs++; } } } return 0; } static void alloc_busno(pciconf_bus_t *parent, pciconf_bus_t *pb) { pb->busno = parent->next_busno; if (parent->next_busno + parent->busno_spacing > parent->last_busno) panic("Too many PCI busses on bus %d", parent->busno); parent->next_busno = parent->next_busno + parent->busno_spacing; pb->next_busno = pb->busno+1; pb->busno_spacing = parent->busno_spacing >> 1; if (!pb->busno_spacing) panic("PCI busses nested too deep."); pb->last_busno = parent->next_busno - 1; }
void amdpm_attach(struct device *parent, struct device *self, void *aux) { struct amdpm_softc *sc = (struct amdpm_softc *) self; struct pci_attach_args *pa = aux; struct timeval tv1, tv2; pcireg_t reg; int i; sc->sc_pc = pa->pa_pc; sc->sc_tag = pa->pa_tag; sc->sc_iot = pa->pa_iot; reg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_CONFREG); if ((reg & AMDPM_PMIOEN) == 0) { printf(": PMxx space isn't enabled\n"); return; } reg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_PMPTR); if (bus_space_map(sc->sc_iot, AMDPM_PMBASE(reg), AMDPM_PMSIZE, 0, &sc->sc_ioh)) { printf(": failed to map PMxx space\n"); return; } reg = pci_conf_read(pa->pa_pc, pa->pa_tag, AMDPM_CONFREG); if (reg & AMDPM_RNGEN) { /* Check to see if we can read data from the RNG. */ (void) bus_space_read_4(sc->sc_iot, sc->sc_ioh, AMDPM_RNGDATA); /* benchmark the RNG */ microtime(&tv1); for (i = 2 * 1024; i--; ) { while(!(bus_space_read_1(sc->sc_iot, sc->sc_ioh, AMDPM_RNGSTAT) & AMDPM_RNGDONE)) ; (void) bus_space_read_4(sc->sc_iot, sc->sc_ioh, AMDPM_RNGDATA); } microtime(&tv2); timersub(&tv2, &tv1, &tv1); if (tv1.tv_sec) tv1.tv_usec += 1000000 * tv1.tv_sec; printf(": rng active, %dKb/sec", 8 * 1000000 / tv1.tv_usec); #ifdef AMDPM_RND_COUNTERS evcnt_attach_dynamic(&sc->sc_rnd_hits, EVCNT_TYPE_MISC, NULL, sc->sc_dev.dv_xname, "rnd hits"); evcnt_attach_dynamic(&sc->sc_rnd_miss, EVCNT_TYPE_MISC, NULL, sc->sc_dev.dv_xname, "rnd miss"); for (i = 0; i < 256; i++) { evcnt_attach_dynamic(&sc->sc_rnd_data[i], EVCNT_TYPE_MISC, NULL, sc->sc_dev.dv_xname, "rnd data"); } #endif timeout_set(&sc->sc_rnd_ch, amdpm_rnd_callout, sc); amdpm_rnd_callout(sc); } }
static void piixpm_attach(device_t parent, device_t self, void *aux) { struct piixpm_softc *sc = device_private(self); struct pci_attach_args *pa = aux; struct i2cbus_attach_args iba; pcireg_t base, conf; pcireg_t pmmisc; pci_intr_handle_t ih; const char *intrstr = NULL; int i, numbusses = 1; sc->sc_dev = self; sc->sc_iot = pa->pa_iot; sc->sc_id = pa->pa_id; sc->sc_pc = pa->pa_pc; sc->sc_pcitag = pa->pa_tag; pci_aprint_devinfo(pa, NULL); if (!pmf_device_register(self, piixpm_suspend, piixpm_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); /* Read configuration */ conf = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_HOSTC); DPRINTF(("%s: conf 0x%x\n", device_xname(self), conf)); if ((PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL) || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_INTEL_82371AB_PMC)) goto nopowermanagement; /* check whether I/O access to PM regs is enabled */ pmmisc = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PMREGMISC); if (!(pmmisc & 1)) goto nopowermanagement; /* Map I/O space */ base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_PM_BASE); if (bus_space_map(sc->sc_pm_iot, PCI_MAPREG_IO_ADDR(base), PIIX_PM_SIZE, 0, &sc->sc_pm_ioh)) { aprint_error_dev(self, "can't map power management I/O space\n"); goto nopowermanagement; } /* * Revision 0 and 1 are PIIX4, 2 is PIIX4E, 3 is PIIX4M. * PIIX4 and PIIX4E have a bug in the timer latch, see Errata #20 * in the "Specification update" (document #297738). */ acpipmtimer_attach(self, sc->sc_pm_iot, sc->sc_pm_ioh, PIIX_PM_PMTMR, (PCI_REVISION(pa->pa_class) < 3) ? ACPIPMT_BADLATCH : 0 ); nopowermanagement: /* SB800 rev 0x40+ needs special initialization */ if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_ATI && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_ATI_SB600_SMB && PCI_REVISION(pa->pa_class) >= 0x40) { if (piixpm_sb800_init(sc) == 0) { numbusses = 4; goto attach_i2c; } aprint_normal_dev(self, "SMBus disabled\n"); return; } if ((conf & PIIX_SMB_HOSTC_HSTEN) == 0) { aprint_normal_dev(self, "SMBus disabled\n"); return; } /* Map I/O space */ base = pci_conf_read(pa->pa_pc, pa->pa_tag, PIIX_SMB_BASE) & 0xffff; if (bus_space_map(sc->sc_smb_iot, PCI_MAPREG_IO_ADDR(base), PIIX_SMB_SIZE, 0, &sc->sc_smb_ioh)) { aprint_error_dev(self, "can't map smbus I/O space\n"); return; } sc->sc_poll = 1; aprint_normal_dev(self, ""); if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_SMI) { /* No PCI IRQ */ aprint_normal("interrupting at SMI, "); } else if ((conf & PIIX_SMB_HOSTC_INTMASK) == PIIX_SMB_HOSTC_IRQ) { /* Install interrupt handler */ if (pci_intr_map(pa, &ih) == 0) { intrstr = pci_intr_string(pa->pa_pc, ih); sc->sc_smb_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, piixpm_intr, sc); if (sc->sc_smb_ih != NULL) { aprint_normal("interrupting at %s", intrstr); sc->sc_poll = 0; } } } if (sc->sc_poll) aprint_normal("polling"); aprint_normal("\n"); attach_i2c: /* Attach I2C bus */ mutex_init(&sc->sc_i2c_mutex, MUTEX_DEFAULT, IPL_NONE); for (i = 0; i < numbusses; i++) { sc->sc_busses[i].sda = i; sc->sc_busses[i].softc = sc; sc->sc_i2c_tags[i].ic_cookie = &sc->sc_busses[i]; sc->sc_i2c_tags[i].ic_acquire_bus = piixpm_i2c_acquire_bus; sc->sc_i2c_tags[i].ic_release_bus = piixpm_i2c_release_bus; sc->sc_i2c_tags[i].ic_exec = piixpm_i2c_exec; memset(&iba, 0, sizeof(iba)); iba.iba_type = I2C_TYPE_SMBUS; iba.iba_tag = &sc->sc_i2c_tags[i]; config_found_ia(self, "i2cbus", &iba, iicbus_print); } }
static void acer_chip_map(struct pciide_softc *sc, const struct pci_attach_args *pa) { struct pciide_channel *cp; int channel; pcireg_t cr, interface; pcireg_t rev = PCI_REVISION(pa->pa_class); struct aceride_softc *acer_sc = (struct aceride_softc *)sc; if (pciide_chipen(sc, pa) == 0) return; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "bus-master DMA support present"); pciide_mapreg_dma(sc, pa); aprint_verbose("\n"); sc->sc_wdcdev.sc_atac.atac_cap = ATAC_CAP_DATA16 | ATAC_CAP_DATA32; if (sc->sc_dma_ok) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_DMA; if (rev >= 0x20) { sc->sc_wdcdev.sc_atac.atac_cap |= ATAC_CAP_UDMA; if (rev >= 0xC7) sc->sc_wdcdev.sc_atac.atac_udma_cap = 6; else if (rev >= 0xC4) sc->sc_wdcdev.sc_atac.atac_udma_cap = 5; else if (rev >= 0xC2) sc->sc_wdcdev.sc_atac.atac_udma_cap = 4; else sc->sc_wdcdev.sc_atac.atac_udma_cap = 2; } sc->sc_wdcdev.irqack = pciide_irqack; if (rev <= 0xc4) { sc->sc_wdcdev.dma_init = acer_dma_init; aprint_verbose_dev(sc->sc_wdcdev.sc_atac.atac_dev, "using PIO transfers above 137GB as workaround for " "48bit DMA access bug, expect reduced performance\n"); } } sc->sc_wdcdev.sc_atac.atac_pio_cap = 4; sc->sc_wdcdev.sc_atac.atac_dma_cap = 2; sc->sc_wdcdev.sc_atac.atac_set_modes = acer_setup_channel; sc->sc_wdcdev.sc_atac.atac_channels = sc->wdc_chanarray; sc->sc_wdcdev.sc_atac.atac_nchannels = PCIIDE_NUM_CHANNELS; sc->sc_wdcdev.wdc_maxdrives = 2; pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CDRC, (pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CDRC) | ACER_CDRC_DMA_EN) & ~ACER_CDRC_FIFO_DISABLE); /* Enable "microsoft register bits" R/W. */ pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR3, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR3) | ACER_CCAR3_PI); pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR1, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR1) & ~(ACER_CHANSTATUS_RO|PCIIDE_CHAN_RO(0)|PCIIDE_CHAN_RO(1))); pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_CCAR2, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_CCAR2) & ~ACER_CHANSTATUSREGS_RO); cr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG); cr |= (PCIIDE_CHANSTATUS_EN << PCI_INTERFACE_SHIFT); { /* * some BIOSes (port-cats ABLE) enable native mode, but don't * setup everything correctly, so allow the forcing of * compat mode */ bool force_compat_mode; bool property_is_set; property_is_set = prop_dictionary_get_bool( device_properties(sc->sc_wdcdev.sc_atac.atac_dev), "ali1543-ide-force-compat-mode", &force_compat_mode); if (property_is_set && force_compat_mode) { cr &= ~((PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1)) << PCI_INTERFACE_SHIFT); } } pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG, cr); /* Don't use cr, re-read the real register content instead */ interface = PCI_INTERFACE(pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_CLASS_REG)); /* From linux: enable "Cable Detection" */ if (rev >= 0xC2) { pciide_pci_write(sc->sc_pc, sc->sc_tag, ACER_0x4B, pciide_pci_read(sc->sc_pc, sc->sc_tag, ACER_0x4B) | ACER_0x4B_CDETECT); } wdc_allocate_regs(&sc->sc_wdcdev); if (rev == 0xC3) { /* install reset bug workaround */ if (pci_find_device(&acer_sc->pcib_pa, acer_pcib_match) == 0) { aprint_error_dev(sc->sc_wdcdev.sc_atac.atac_dev, "WARNING: can't find pci-isa bridge\n"); } else sc->sc_wdcdev.reset = acer_do_reset; } for (channel = 0; channel < sc->sc_wdcdev.sc_atac.atac_nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; if ((interface & PCIIDE_CHAN_EN(channel)) == 0) { aprint_normal_dev(sc->sc_wdcdev.sc_atac.atac_dev, "%s channel ignored (disabled)\n", cp->name); cp->ata_channel.ch_flags |= ATACH_DISABLED; continue; } /* newer controllers seems to lack the ACER_CHIDS. Sigh */ pciide_mapchan(pa, cp, interface, (rev >= 0xC2) ? pciide_pci_intr : acer_pci_intr); } }
static void iop_pci_attach(device_t parent, device_t self, void *aux) { struct pci_attach_args *pa; struct iop_softc *sc; pci_chipset_tag_t pc; pci_intr_handle_t ih; const char *intrstr; pcireg_t reg; int i; char intrbuf[PCI_INTRSTR_LEN]; sc = device_private(self); sc->sc_dev = self; pa = aux; pc = pa->pa_pc; printf(": "); /* * The kernel always uses the first memory mapping to communicate * with the IOP. */ for (i = PCI_MAPREG_START; i < PCI_MAPREG_END; i += 4) { reg = pci_conf_read(pc, pa->pa_tag, i); if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_MEM) { sc->sc_memaddr = PCI_MAPREG_MEM_ADDR(reg); break; } } if (i == PCI_MAPREG_END) { printf("can't find mapping\n"); return; } /* Map the register window. */ if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_MEM, 0, &sc->sc_iot, &sc->sc_ioh, NULL, NULL)) { aprint_error_dev(self, "can't map register window\n"); return; } /* Map the 2nd register window. */ if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_DPT && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_DPT_RAID_2005S) { i += 4; /* next BAR */ if (i == PCI_MAPREG_END) { printf("can't find mapping\n"); return; } #if 0 /* Should we check it? (see FreeBSD's asr driver) */ reg = pci_conf_read(pc, pa->pa_tag, PCI_SUBSYS_ID_REG); printf("subid %x, %x\n", PCI_VENDOR(reg), PCI_PRODUCT(reg)); #endif if (pci_mapreg_map(pa, i, PCI_MAPREG_TYPE_MEM, 0, &sc->sc_msg_iot, &sc->sc_msg_ioh, NULL, NULL)) { aprint_error_dev(self, "can't map 2nd register window\n"); return; } } else { /* iop devices other than 2005S */ sc->sc_msg_iot = sc->sc_iot; sc->sc_msg_ioh = sc->sc_ioh; } sc->sc_pcibus = pa->pa_bus; sc->sc_pcidev = pa->pa_device; sc->sc_dmat = pa->pa_dmat; sc->sc_bus_memt = pa->pa_memt; sc->sc_bus_iot = pa->pa_iot; /* Enable the device. */ reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg | PCI_COMMAND_MASTER_ENABLE); /* Map and establish the interrupt.. */ if (pci_intr_map(pa, &ih)) { printf("can't map interrupt\n"); return; } intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pc, ih, IPL_BIO, iop_intr, sc); if (sc->sc_ih == NULL) { printf("can't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); return; } /* Attach to the bus-independent code. */ iop_init(sc, intrstr); }
static void yds_attach(device_t parent, device_t self, void *aux) { struct yds_softc *sc; struct pci_attach_args *pa; pci_chipset_tag_t pc; char const *intrstr; pci_intr_handle_t ih; pcireg_t reg; struct yds_codec_softc *codec; int i, r, to; int revision; int ac97_id2; char intrbuf[PCI_INTRSTR_LEN]; sc = device_private(self); sc->sc_dev = self; pa = (struct pci_attach_args *)aux; pc = pa->pa_pc; revision = PCI_REVISION(pa->pa_class); pci_aprint_devinfo(pa, NULL); /* Map register to memory */ if (pci_mapreg_map(pa, YDS_PCI_MBA, PCI_MAPREG_TYPE_MEM, 0, &sc->memt, &sc->memh, NULL, NULL)) { aprint_error_dev(self, "can't map memory space\n"); return; } /* Map and establish the interrupt. */ if (pci_intr_map(pa, &ih)) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } mutex_init(&sc->sc_lock, MUTEX_DEFAULT, IPL_AUDIO); /* XXX IPL_NONE? */ mutex_init(&sc->sc_intr_lock, MUTEX_DEFAULT, IPL_AUDIO); intrstr = pci_intr_string(pc, ih, intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pc, ih, IPL_AUDIO, yds_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); sc->sc_dmatag = pa->pa_dmat; sc->sc_pc = pc; sc->sc_pcitag = pa->pa_tag; sc->sc_id = pa->pa_id; sc->sc_revision = revision; sc->sc_flags = yds_get_dstype(sc->sc_id); #ifdef AUDIO_DEBUG if (ydsdebug) { char bits[80]; snprintb(bits, sizeof(bits), YDS_CAP_BITS, sc->sc_flags); printf("%s: chip has %s\n", device_xname(self), bits); } #endif /* Disable legacy mode */ reg = pci_conf_read(pc, pa->pa_tag, YDS_PCI_LEGACY); pci_conf_write(pc, pa->pa_tag, YDS_PCI_LEGACY, reg & YDS_PCI_LEGACY_LAD); /* Enable the device. */ reg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); reg |= (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE); pci_conf_write(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg); reg = pci_conf_read(pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); /* Mute all volumes */ for (i = 0x80; i < 0xc0; i += 2) YWRITE2(sc, i, 0); /* Initialize the device */ if (yds_init(sc)) { aprint_error_dev(self, "initialize failed\n"); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } /* * Detect primary/secondary AC97 * YMF754 Hardware Specification Rev 1.01 page 24 */ reg = pci_conf_read(pc, pa->pa_tag, YDS_PCI_DSCTRL); pci_conf_write(pc, pa->pa_tag, YDS_PCI_DSCTRL, reg & ~YDS_DSCTRL_CRST); delay(400000); /* Needed for 740C. */ /* Primary */ for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR1) & AC97_BUSY) == 0) break; delay(1); } if (to == AC97_TIMEOUT) { aprint_error_dev(self, "no AC97 available\n"); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } /* Secondary */ /* Secondary AC97 is used for 4ch audio. Currently unused. */ ac97_id2 = -1; if ((YREAD2(sc, YDS_ACTIVITY) & YDS_ACTIVITY_DOCKA) == 0) goto detected; #if 0 /* reset secondary... */ YWRITE2(sc, YDS_GPIO_OCTRL, YREAD2(sc, YDS_GPIO_OCTRL) & ~YDS_GPIO_GPO2); YWRITE2(sc, YDS_GPIO_FUNCE, (YREAD2(sc, YDS_GPIO_FUNCE)&(~YDS_GPIO_GPC2))|YDS_GPIO_GPE2); #endif for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR2) & AC97_BUSY) == 0) break; delay(1); } if (to < AC97_TIMEOUT) { /* detect id */ for (ac97_id2 = 1; ac97_id2 < 4; ac97_id2++) { YWRITE2(sc, AC97_CMD_ADDR, AC97_CMD_READ | AC97_ID(ac97_id2) | 0x28); for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR2) & AC97_BUSY) == 0) goto detected; delay(1); } } if (ac97_id2 == 4) ac97_id2 = -1; detected: ; } pci_conf_write(pc, pa->pa_tag, YDS_PCI_DSCTRL, reg | YDS_DSCTRL_CRST); delay (20); pci_conf_write(pc, pa->pa_tag, YDS_PCI_DSCTRL, reg & ~YDS_DSCTRL_CRST); delay (400000); for (to = 0; to < AC97_TIMEOUT; to++) { if ((YREAD2(sc, AC97_STAT_ADDR1) & AC97_BUSY) == 0) break; delay(1); } /* * Attach ac97 codec */ for (i = 0; i < 2; i++) { static struct { int data; int addr; } statregs[] = { {AC97_STAT_DATA1, AC97_STAT_ADDR1}, {AC97_STAT_DATA2, AC97_STAT_ADDR2}, }; if (i == 1 && ac97_id2 == -1) break; /* secondary ac97 not available */ codec = &sc->sc_codec[i]; codec->sc = sc; codec->id = i == 1 ? ac97_id2 : 0; codec->status_data = statregs[i].data; codec->status_addr = statregs[i].addr; codec->host_if.arg = codec; codec->host_if.attach = yds_attach_codec; codec->host_if.read = yds_read_codec; codec->host_if.write = yds_write_codec; codec->host_if.reset = yds_reset_codec; r = ac97_attach(&codec->host_if, self, &sc->sc_lock); if (r != 0) { aprint_error_dev(self, "can't attach codec (error 0x%X)\n", r); mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } } if (0 != auconv_create_encodings(yds_formats, YDS_NFORMATS, &sc->sc_encodings)) { mutex_destroy(&sc->sc_lock); mutex_destroy(&sc->sc_intr_lock); return; } audio_attach_mi(&yds_hw_if, sc, self); sc->sc_legacy_iot = pa->pa_iot; config_defer(self, yds_configure_legacy); if (!pmf_device_register(self, yds_suspend, yds_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); }
void ral_pci_attach(device_t parent, device_t self, void *aux) { struct ral_pci_softc *psc = device_private(self); struct rt2560_softc *sc = &psc->sc_sc; const struct pci_attach_args *pa = aux; const char *intrstr; bus_addr_t base; pci_intr_handle_t ih; pcireg_t memtype, reg; int error; char intrbuf[PCI_INTRSTR_LEN]; pci_aprint_devinfo(pa, NULL); if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_RALINK) { switch (PCI_PRODUCT(pa->pa_id)) { case PCI_PRODUCT_RALINK_RT2560: psc->sc_opns = &ral_rt2560_opns; break; case PCI_PRODUCT_RALINK_RT2561: case PCI_PRODUCT_RALINK_RT2561S: case PCI_PRODUCT_RALINK_RT2661: psc->sc_opns = &ral_rt2661_opns; break; default: psc->sc_opns = &ral_rt2860_opns; break; } } else { /* all other vendors are RT2860 only */ psc->sc_opns = &ral_rt2860_opns; } sc->sc_dev = self; sc->sc_dmat = pa->pa_dmat; psc->sc_pc = pa->pa_pc; /* enable the appropriate bits in the PCI CSR */ reg = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG); reg |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_MEM_ENABLE; pci_conf_write(pa->pa_pc, pa->pa_tag, PCI_COMMAND_STATUS_REG, reg); /* map control/status registers */ memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, RAL_PCI_BAR0); error = pci_mapreg_map(pa, RAL_PCI_BAR0, memtype, 0, &sc->sc_st, &sc->sc_sh, &base, &psc->sc_mapsize); if (error != 0) { aprint_error(": could not map memory space\n"); return; } if (pci_intr_map(pa, &ih) != 0) { aprint_error(": could not map interrupt\n"); return; } intrstr = pci_intr_string(psc->sc_pc, ih, intrbuf, sizeof(intrbuf)); psc->sc_ih = pci_intr_establish(psc->sc_pc, ih, IPL_NET, psc->sc_opns->intr, sc); if (psc->sc_ih == NULL) { aprint_error(": could not establish interrupt"); if (intrstr != NULL) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); (*psc->sc_opns->attach)(sc, PCI_PRODUCT(pa->pa_id)); }
static void hdaudio_pci_attach(device_t parent, device_t self, void *opaque) { struct hdaudio_pci_softc *sc = device_private(self); struct pci_attach_args *pa = opaque; pci_intr_handle_t ih; const char *intrstr; pcireg_t csr; int err; aprint_naive("\n"); aprint_normal(": HD Audio Controller\n"); sc->sc_pc = pa->pa_pc; sc->sc_tag = pa->pa_tag; sc->sc_id = pa->pa_id; sc->sc_hdaudio.sc_subsystem = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_SUBSYS_ID_REG); /* Enable busmastering and MMIO access */ csr = pci_conf_read(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); csr |= PCI_COMMAND_MASTER_ENABLE | PCI_COMMAND_BACKTOBACK_ENABLE; pci_conf_write(sc->sc_pc, sc->sc_tag, PCI_COMMAND_STATUS_REG, csr); /* Map MMIO registers */ err = pci_mapreg_map(pa, HDAUDIO_PCI_AZBARL, PCI_MAPREG_TYPE_MEM, 0, &sc->sc_hdaudio.sc_memt, &sc->sc_hdaudio.sc_memh, &sc->sc_hdaudio.sc_membase, &sc->sc_hdaudio.sc_memsize); if (err) { aprint_error_dev(self, "couldn't map mmio space\n"); return; } sc->sc_hdaudio.sc_memvalid = true; sc->sc_hdaudio.sc_dmat = pa->pa_dmat; /* Map interrupt and establish handler */ err = pci_intr_map(pa, &ih); if (err) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pa->pa_pc, ih); sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_AUDIO, hdaudio_pci_intr, sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt"); if (intrstr) aprint_error(" at %s", intrstr); aprint_error("\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); if (!pmf_device_register(self, NULL, hdaudio_pci_resume)) aprint_error_dev(self, "couldn't establish power handler\n"); hdaudio_pci_reinit(sc); /* Attach bus-independent HD audio layer */ hdaudio_attach(self, &sc->sc_hdaudio); }
va_end(arg); } /* Initialize the pci-pci bridges and bus hierarchy. */ /* let rec */ static void pci_businit (int port, int bus, pci_flags_t flags); static void pci_businit_dev_func (pcitag_t tag, pci_flags_t flags) { pcireg_t id, class, bhlc; class = pci_conf_read(tag, PCI_CLASS_REG); id = pci_conf_read(tag, PCI_ID_REG); bhlc = pci_conf_read(tag, PCI_BHLC_REG); pcindev++; if (PCI_CLASS(class) == PCI_CLASS_BRIDGE && PCI_HDRTYPE_TYPE(bhlc) == 1) { enum {NONE, PCI, LDT} sec_type; int offset; int port, bus, device, function; int bus2; struct pci_bus *ppri, *psec; pcireg_t data; sec_type = NONE; offset = 0; switch (PCI_SUBCLASS(class)) {
/* * This routine is called after all the ISA devices are configured, * to avoid conflict. */ static void yds_configure_legacy(struct yds_softc *sc) #define FLEXIBLE (sc->sc_flags & YDS_CAP_LEGACY_FLEXIBLE) #define SELECTABLE (sc->sc_flags & YDS_CAP_LEGACY_SELECTABLE) { pcireg_t reg; struct device *dev; int i; bus_addr_t opl_addrs[] = {0x388, 0x398, 0x3A0, 0x3A8}; bus_addr_t mpu_addrs[] = {0x330, 0x300, 0x332, 0x334}; if (!FLEXIBLE && !SELECTABLE) return; reg = pci_conf_read(sc->sc_pc, sc->sc_pcitag, YDS_PCI_LEGACY); reg &= ~0x8133c03f; /* these bits are out of interest */ reg |= (YDS_PCI_EX_LEGACY_IMOD | YDS_PCI_LEGACY_FMEN | YDS_PCI_LEGACY_MEN /*| YDS_PCI_LEGACY_MIEN*/); if (sc->sc_flags & YDS_CAP_LEGACY_SMOD_DISABLE) reg |= YDS_PCI_EX_LEGACY_SMOD_DISABLE; if (FLEXIBLE) { pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_LEGACY, reg); delay(100*1000); } /* Look for OPL */ dev = 0; for (i = 0; i < sizeof(opl_addrs) / sizeof (bus_addr_t); i++) { if (SELECTABLE) { pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_LEGACY, reg | (i << (0+16))); delay(100*1000); /* wait 100ms */ } else pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_FM_BA, opl_addrs[i]); if (bus_space_map(sc->sc_opl_iot, opl_addrs[i], 4, 0, &sc->sc_opl_ioh) == 0) { struct audio_attach_args aa; aa.type = AUDIODEV_TYPE_OPL; aa.hwif = aa.hdl = NULL; dev = config_found(&sc->sc_dev, &aa, audioprint); if (dev == 0) bus_space_unmap(sc->sc_opl_iot, sc->sc_opl_ioh, 4); else { if (SELECTABLE) reg |= (i << (0+16)); break; } } } if (dev == 0) { reg &= ~YDS_PCI_LEGACY_FMEN; pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_LEGACY, reg); } else { /* Max. volume */ YWRITE4(sc, YDS_LEGACY_OUT_VOLUME, 0x3fff3fff); YWRITE4(sc, YDS_LEGACY_REC_VOLUME, 0x3fff3fff); } /* Look for MPU */ dev = 0; for (i = 0; i < sizeof(mpu_addrs) / sizeof (bus_addr_t); i++) { if (SELECTABLE) pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_LEGACY, reg | (i << (4+16))); else pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_MPU_BA, mpu_addrs[i]); if (bus_space_map(sc->sc_mpu_iot, mpu_addrs[i], 2, 0, &sc->sc_mpu_ioh) == 0) { struct audio_attach_args aa; aa.type = AUDIODEV_TYPE_MPU; aa.hwif = aa.hdl = NULL; dev = config_found(&sc->sc_dev, &aa, audioprint); if (dev == 0) bus_space_unmap(sc->sc_mpu_iot, sc->sc_mpu_ioh, 2); else { if (SELECTABLE) reg |= (i << (4+16)); break; } } } if (dev == 0) { reg &= ~(YDS_PCI_LEGACY_MEN | YDS_PCI_LEGACY_MIEN); pci_conf_write(sc->sc_pc, sc->sc_pcitag, YDS_PCI_LEGACY, reg); } sc->sc_mpu = dev; }
void ppbattach(struct device *parent, struct device *self, void *aux) { struct ppb_softc *sc = (struct ppb_softc *)self; struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; struct pcibus_attach_args pba; pci_intr_handle_t ih; pcireg_t busdata, reg, blr; char *name; int pin; sc->sc_pc = pc; sc->sc_tag = pa->pa_tag; busdata = pci_conf_read(pc, pa->pa_tag, PPB_REG_BUSINFO); if (PPB_BUSINFO_SECONDARY(busdata) == 0) { printf(": not configured by system firmware\n"); return; } #if 0 /* * XXX can't do this, because we're not given our bus number * (we shouldn't need it), and because we've no way to * decompose our tag. */ /* sanity check. */ if (pa->pa_bus != PPB_BUSINFO_PRIMARY(busdata)) panic("ppbattach: bus in tag (%d) != bus in reg (%d)", pa->pa_bus, PPB_BUSINFO_PRIMARY(busdata)); #endif /* Check for PCI Express capabilities and setup hotplug support. */ if (pci_get_capability(pc, pa->pa_tag, PCI_CAP_PCIEXPRESS, &sc->sc_cap_off, ®) && (reg & PCI_PCIE_XCAP_SI)) { #ifdef __i386__ if (pci_intr_map(pa, &ih) == 0) sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_BIO, ppb_intr, sc, self->dv_xname); #else if (pci_intr_map_msi(pa, &ih) == 0 || pci_intr_map(pa, &ih) == 0) sc->sc_intrhand = pci_intr_establish(pc, ih, IPL_BIO, ppb_intr, sc, self->dv_xname); #endif if (sc->sc_intrhand) { printf(": %s", pci_intr_string(pc, ih)); /* Enable hotplug interrupt. */ reg = pci_conf_read(pc, pa->pa_tag, sc->sc_cap_off + PCI_PCIE_SLCSR); reg |= (PCI_PCIE_SLCSR_HPE | PCI_PCIE_SLCSR_PDE); pci_conf_write(pc, pa->pa_tag, sc->sc_cap_off + PCI_PCIE_SLCSR, reg); timeout_set(&sc->sc_to, ppb_hotplug_insert_finish, sc); } } printf("\n"); if (PCI_VENDOR(pa->pa_id) != PCI_VENDOR_INTEL || (PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_INTEL_82801BA_HPB && PCI_PRODUCT(pa->pa_id) != PCI_PRODUCT_INTEL_82801BAM_HPB)) ppb_alloc_resources(sc, pa); for (pin = PCI_INTERRUPT_PIN_A; pin <= PCI_INTERRUPT_PIN_D; pin++) { pa->pa_intrpin = pa->pa_rawintrpin = pin; pa->pa_intrline = 0; pci_intr_map(pa, &sc->sc_ih[pin - PCI_INTERRUPT_PIN_A]); } /* * The UltraSPARC-IIi APB doesn't implement the standard * address range registers. */ if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_SIMBA) goto attach; /* Figure out the I/O address range of the bridge. */ blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_IOSTATUS); sc->sc_iobase = (blr & 0x000000f0) << 8; sc->sc_iolimit = (blr & 0x000f000) | 0x00000fff; blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_IO_HI); sc->sc_iobase |= (blr & 0x0000ffff) << 16; sc->sc_iolimit |= (blr & 0xffff0000); if (sc->sc_iolimit > sc->sc_iobase) { name = malloc(32, M_DEVBUF, M_NOWAIT); if (name) { snprintf(name, 32, "%s pciio", sc->sc_dev.dv_xname); sc->sc_ioex = extent_create(name, 0, 0xffffffff, M_DEVBUF, NULL, 0, EX_NOWAIT | EX_FILLED); extent_free(sc->sc_ioex, sc->sc_iobase, sc->sc_iolimit - sc->sc_iobase + 1, EX_NOWAIT); } } /* Figure out the memory mapped I/O address range of the bridge. */ blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_MEM); sc->sc_membase = (blr & 0x0000fff0) << 16; sc->sc_memlimit = (blr & 0xfff00000) | 0x000fffff; if (sc->sc_memlimit > sc->sc_membase) { name = malloc(32, M_DEVBUF, M_NOWAIT); if (name) { snprintf(name, 32, "%s pcimem", sc->sc_dev.dv_xname); sc->sc_memex = extent_create(name, 0, 0xffffffff, M_DEVBUF, NULL, 0, EX_NOWAIT | EX_FILLED); extent_free(sc->sc_memex, sc->sc_membase, sc->sc_memlimit - sc->sc_membase + 1, EX_NOWAIT); } } /* Figure out the prefetchable MMI/O address range of the bridge. */ blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFMEM); sc->sc_pmembase = (blr & 0x0000fff0) << 16; sc->sc_pmemlimit = (blr & 0xfff00000) | 0x000fffff; #ifdef __LP64__ /* XXX because extents use long... */ blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFBASE_HI32); sc->sc_pmembase |= ((uint64_t)blr) << 32; blr = pci_conf_read(pc, pa->pa_tag, PPB_REG_PREFLIM_HI32); sc->sc_pmemlimit |= ((uint64_t)blr) << 32; #endif if (sc->sc_pmemlimit > sc->sc_pmembase) { name = malloc(32, M_DEVBUF, M_NOWAIT); if (name) { snprintf(name, 32, "%s pcipmem", sc->sc_dev.dv_xname); sc->sc_pmemex = extent_create(name, 0, (u_long)-1L, M_DEVBUF, NULL, 0, EX_NOWAIT | EX_FILLED); extent_free(sc->sc_pmemex, sc->sc_pmembase, sc->sc_pmemlimit - sc->sc_pmembase + 1, EX_NOWAIT); } } /* * The Intel 82801BAM Hub-to-PCI can decode subtractively. * XXX We probably should handle subtractive decode bridges * in general. */ if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_INTEL && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82801BA_HPB || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_INTEL_82801BAM_HPB)) { if (sc->sc_ioex == NULL) sc->sc_ioex = pa->pa_ioex; if (sc->sc_memex == NULL) sc->sc_memex = pa->pa_memex; } attach: /* * Attach the PCI bus that hangs off of it. * * XXX Don't pass-through Memory Read Multiple. Should we? * XXX Consult the spec... */ bzero(&pba, sizeof(pba)); pba.pba_busname = "pci"; pba.pba_iot = pa->pa_iot; pba.pba_memt = pa->pa_memt; pba.pba_dmat = pa->pa_dmat; pba.pba_pc = pc; pba.pba_flags = pa->pa_flags & ~PCI_FLAGS_MRM_OKAY; pba.pba_ioex = sc->sc_ioex; pba.pba_memex = sc->sc_memex; pba.pba_pmemex = sc->sc_pmemex; pba.pba_domain = pa->pa_domain; pba.pba_bus = PPB_BUSINFO_SECONDARY(busdata); pba.pba_bridgeih = sc->sc_ih; pba.pba_bridgetag = &sc->sc_tag; pba.pba_intrswiz = pa->pa_intrswiz; pba.pba_intrtag = pa->pa_intrtag; sc->sc_psc = config_found(self, &pba, ppbprint); }
static void rtsx_pci_attach(device_t parent, device_t self, void *aux) { struct rtsx_pci_softc *sc = device_private(self); struct pci_attach_args *pa = (struct pci_attach_args *)aux; pci_chipset_tag_t pc = pa->pa_pc; pcitag_t tag = pa->pa_tag; pcireg_t reg; char const *intrstr; bus_space_tag_t iot; bus_space_handle_t ioh; bus_size_t size; uint32_t flags; char intrbuf[PCI_INTRSTR_LEN]; sc->sc.sc_dev = self; sc->sc_pc = pc; pci_aprint_devinfo(pa, NULL); if ((pci_conf_read(pc, tag, RTSX_CFG_PCI) & RTSX_CFG_ASIC) != 0) { aprint_error_dev(self, "no asic\n"); return; } if (pci_mapreg_map(pa, RTSX_PCI_BAR, PCI_MAPREG_TYPE_MEM, 0, &iot, &ioh, NULL, &size)) { aprint_error_dev(self, "couldn't map registers\n"); return; } if (pci_intr_alloc(pa, &sc->sc_pihp, NULL, 0)) { aprint_error_dev(self, "couldn't map interrupt\n"); return; } intrstr = pci_intr_string(pc, sc->sc_pihp[0], intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pc, sc->sc_pihp[0], IPL_SDMMC, rtsx_intr, &sc->sc); if (sc->sc_ih == NULL) { aprint_error_dev(self, "couldn't establish interrupt\n"); return; } aprint_normal_dev(self, "interrupting at %s\n", intrstr); /* Enable the device */ reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); reg |= PCI_COMMAND_MASTER_ENABLE; pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, reg); /* Power up the device */ pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D0); switch (PCI_PRODUCT(pa->pa_id)) { case PCI_PRODUCT_REALTEK_RTS5209: flags = RTSX_F_5209; break; case PCI_PRODUCT_REALTEK_RTS5227: flags = RTSX_F_5227; break; case PCI_PRODUCT_REALTEK_RTS5229: flags = RTSX_F_5229; break; case PCI_PRODUCT_REALTEK_RTL8402: flags = RTSX_F_8402; break; case PCI_PRODUCT_REALTEK_RTL8411: flags = RTSX_F_8411; break; case PCI_PRODUCT_REALTEK_RTL8411B: flags = RTSX_F_8411B; break; default: flags = 0; break; } if (rtsx_attach(&sc->sc, iot, ioh, size, pa->pa_dmat, flags) != 0) { aprint_error_dev(self, "couldn't initialize chip\n"); return; } if (!pmf_device_register1(self, rtsx_suspend, rtsx_resume, rtsx_shutdown)) aprint_error_dev(self, "couldn't establish powerhook\n"); }
int ppbactivate(struct device *self, int act) { struct ppb_softc *sc = (void *)self; pci_chipset_tag_t pc = sc->sc_pc; pcitag_t tag = sc->sc_tag; pcireg_t blr, reg; int off, rv = 0; switch (act) { case DVACT_QUIESCE: rv = config_activate_children(self, act); break; case DVACT_SUSPEND: rv = config_activate_children(self, act); /* Save registers that may get lost. */ sc->sc_csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); sc->sc_bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); sc->sc_bir = pci_conf_read(pc, tag, PPB_REG_BUSINFO); sc->sc_bcr = pci_conf_read(pc, tag, PPB_REG_BRIDGECONTROL); sc->sc_int = pci_conf_read(pc, tag, PCI_INTERRUPT_REG); if (sc->sc_cap_off) sc->sc_slcsr = pci_conf_read(pc, tag, sc->sc_cap_off + PCI_PCIE_SLCSR); if (pci_get_capability(pc, tag, PCI_CAP_MSI, &off, ®)) { sc->sc_msi_ma = pci_conf_read(pc, tag, off + PCI_MSI_MA); if (reg & PCI_MSI_MC_C64) { sc->sc_msi_mau32 = pci_conf_read(pc, tag, off + PCI_MSI_MAU32); sc->sc_msi_md = pci_conf_read(pc, tag, off + PCI_MSI_MD64); } else { sc->sc_msi_md = pci_conf_read(pc, tag, off + PCI_MSI_MD32); } sc->sc_msi_mc = reg; } if (pci_dopm) { /* Place the bridge into D3. */ sc->sc_pmcsr_state = pci_get_powerstate(pc, tag); pci_set_powerstate(pc, tag, PCI_PMCSR_STATE_D3); } break; case DVACT_RESUME: if (pci_dopm) { /* Restore power. */ pci_set_powerstate(pc, tag, sc->sc_pmcsr_state); } /* Restore the registers saved above. */ pci_conf_write(pc, tag, PCI_BHLC_REG, sc->sc_bhlcr); pci_conf_write(pc, tag, PPB_REG_BUSINFO, sc->sc_bir); pci_conf_write(pc, tag, PPB_REG_BRIDGECONTROL, sc->sc_bcr); pci_conf_write(pc, tag, PCI_INTERRUPT_REG, sc->sc_int); if (sc->sc_cap_off) pci_conf_write(pc, tag, sc->sc_cap_off + PCI_PCIE_SLCSR, sc->sc_slcsr); /* Restore I/O window. */ blr = pci_conf_read(pc, tag, PPB_REG_IOSTATUS); blr &= 0xffff0000; blr |= sc->sc_iolimit & PPB_IO_MASK; blr |= (sc->sc_iobase >> PPB_IO_SHIFT); pci_conf_write(pc, tag, PPB_REG_IOSTATUS, blr); blr = (sc->sc_iobase & 0xffff0000) >> 16; blr |= sc->sc_iolimit & 0xffff0000; pci_conf_write(pc, tag, PPB_REG_IO_HI, blr); /* Restore memory mapped I/O window. */ blr = sc->sc_memlimit & PPB_MEM_MASK; blr |= (sc->sc_membase >> PPB_MEM_SHIFT); pci_conf_write(pc, tag, PPB_REG_MEM, blr); /* Restore prefetchable MMI/O window. */ blr = sc->sc_pmemlimit & PPB_MEM_MASK; blr |= (sc->sc_pmembase >> PPB_MEM_SHIFT); pci_conf_write(pc, tag, PPB_REG_PREFMEM, blr); #ifdef __LP64__ pci_conf_write(pc, tag, PPB_REG_PREFBASE_HI32, sc->sc_pmembase >> 32); pci_conf_write(pc, tag, PPB_REG_PREFLIM_HI32, sc->sc_pmemlimit >> 32); #endif if (pci_get_capability(pc, tag, PCI_CAP_MSI, &off, ®)) { pci_conf_write(pc, tag, off + PCI_MSI_MA, sc->sc_msi_ma); if (reg & PCI_MSI_MC_C64) { pci_conf_write(pc, tag, off + PCI_MSI_MAU32, sc->sc_msi_mau32); pci_conf_write(pc, tag, off + PCI_MSI_MD64, sc->sc_msi_md); } else { pci_conf_write(pc, tag, off + PCI_MSI_MD32, sc->sc_msi_md); } pci_conf_write(pc, tag, off + PCI_MSI_MC, sc->sc_msi_mc); } /* * Restore command register last to avoid exposing * uninitialised windows. */ reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, (reg & 0xffff0000) | (sc->sc_csr & 0x0000ffff)); rv = config_activate_children(self, act); break; } return (rv); }
static void ichsmb_attach(device_t parent, device_t self, void *aux) { struct ichsmb_softc *sc = device_private(self); struct pci_attach_args *pa = aux; struct i2cbus_attach_args iba; pcireg_t conf; bus_size_t iosize; pci_intr_handle_t ih; const char *intrstr = NULL; char intrbuf[PCI_INTRSTR_LEN]; sc->sc_dev = self; pci_aprint_devinfo(pa, NULL); /* Read configuration */ conf = pci_conf_read(pa->pa_pc, pa->pa_tag, LPCIB_SMB_HOSTC); DPRINTF(("%s: conf 0x%08x\n", device_xname(sc->sc_dev), conf)); if ((conf & LPCIB_SMB_HOSTC_HSTEN) == 0) { aprint_error_dev(self, "SMBus disabled\n"); goto out; } /* Map I/O space */ if (pci_mapreg_map(pa, LPCIB_SMB_BASE, PCI_MAPREG_TYPE_IO, 0, &sc->sc_iot, &sc->sc_ioh, NULL, &iosize)) { aprint_error_dev(self, "can't map I/O space\n"); goto out; } sc->sc_poll = 1; if (conf & LPCIB_SMB_HOSTC_SMIEN) { /* No PCI IRQ */ aprint_normal_dev(self, "interrupting at SMI\n"); } else { /* Install interrupt handler */ if (pci_intr_map(pa, &ih) == 0) { intrstr = pci_intr_string(pa->pa_pc, ih, intrbuf, sizeof(intrbuf)); sc->sc_ih = pci_intr_establish(pa->pa_pc, ih, IPL_BIO, ichsmb_intr, sc); if (sc->sc_ih != NULL) { aprint_normal_dev(self, "interrupting at %s\n", intrstr); sc->sc_poll = 0; } } if (sc->sc_poll) aprint_normal_dev(self, "polling\n"); } /* Attach I2C bus */ mutex_init(&sc->sc_i2c_mutex, MUTEX_DEFAULT, IPL_NONE); sc->sc_i2c_tag.ic_cookie = sc; sc->sc_i2c_tag.ic_acquire_bus = ichsmb_i2c_acquire_bus; sc->sc_i2c_tag.ic_release_bus = ichsmb_i2c_release_bus; sc->sc_i2c_tag.ic_exec = ichsmb_i2c_exec; memset(&iba, 0, sizeof(iba)); iba.iba_type = I2C_TYPE_SMBUS; iba.iba_tag = &sc->sc_i2c_tag; config_found(self, &iba, iicbus_print); out: if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); }
void ppb_alloc_resources(struct ppb_softc *sc, struct pci_attach_args *pa) { pci_chipset_tag_t pc = sc->sc_pc; pcireg_t id, busdata, blr, bhlcr, type, csr; pcireg_t addr, mask; pcitag_t tag; int bus, dev; int reg, reg_start, reg_end, reg_rom; int io_count = 0; int mem_count = 0; bus_addr_t start, end; u_long base, size; if (pa->pa_memex == NULL) return; busdata = pci_conf_read(pc, sc->sc_tag, PPB_REG_BUSINFO); bus = PPB_BUSINFO_SECONDARY(busdata); if (bus == 0) return; /* * Count number of devices. If there are no devices behind * this bridge, there's no point in allocating any address * space. */ for (dev = 0; dev < pci_bus_maxdevs(pc, bus); dev++) { tag = pci_make_tag(pc, bus, dev, 0); id = pci_conf_read(pc, tag, PCI_ID_REG); if (PCI_VENDOR(id) == PCI_VENDOR_INVALID || PCI_VENDOR(id) == 0) continue; bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); switch (PCI_HDRTYPE_TYPE(bhlcr)) { case 0: reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_END; reg_rom = PCI_ROM_REG; break; case 1: /* PCI-PCI bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PPB_END; reg_rom = 0; /* 0x38 */ break; case 2: /* PCI-Cardbus bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PCB_END; reg_rom = 0; break; default: return; } for (reg = reg_start; reg < reg_end; reg += 4) { if (pci_mapreg_probe(pc, tag, reg, &type) == 0) continue; if (type == PCI_MAPREG_TYPE_IO) io_count++; else mem_count++; } if (reg_rom != 0) { addr = pci_conf_read(pc, tag, reg_rom); pci_conf_write(pc, tag, reg_rom, ~PCI_ROM_ENABLE); mask = pci_conf_read(pc, tag, reg_rom); pci_conf_write(pc, tag, reg_rom, addr); if (PCI_ROM_SIZE(mask)) mem_count++; } } csr = pci_conf_read(pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); /* * Get the bridge in a consistent state. If memory mapped I/O * is disabled, disabled the associated windows as well. */ if ((csr & PCI_COMMAND_MEM_ENABLE) == 0) { pci_conf_write(pc, sc->sc_tag, PPB_REG_MEM, 0x0000ffff); pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFMEM, 0x0000ffff); pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFBASE_HI32, 0); pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFLIM_HI32, 0); } /* Allocate I/O address space if necessary. */ if (io_count > 0 && pa->pa_ioex) { blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IOSTATUS); sc->sc_iobase = (blr << PPB_IO_SHIFT) & PPB_IO_MASK; sc->sc_iolimit = (blr & PPB_IO_MASK) | 0x00000fff; blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IO_HI); sc->sc_iobase |= (blr & 0x0000ffff) << 16; sc->sc_iolimit |= (blr & 0xffff0000); if (sc->sc_iolimit < sc->sc_iobase || sc->sc_iobase == 0) { start = max(PCI_IO_START, pa->pa_ioex->ex_start); end = min(PCI_IO_END, pa->pa_ioex->ex_end); for (size = 0x2000; size >= PPB_IO_MIN; size >>= 1) if (extent_alloc_subregion(pa->pa_ioex, start, end, size, size, 0, 0, 0, &base) == 0) break; if (size >= PPB_IO_MIN) { sc->sc_iobase = base; sc->sc_iolimit = base + size - 1; blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IOSTATUS); blr &= 0xffff0000; blr |= sc->sc_iolimit & PPB_IO_MASK; blr |= (sc->sc_iobase >> PPB_IO_SHIFT); pci_conf_write(pc, sc->sc_tag, PPB_REG_IOSTATUS, blr); blr = (sc->sc_iobase & 0xffff0000) >> 16; blr |= sc->sc_iolimit & 0xffff0000; pci_conf_write(pc, sc->sc_tag, PPB_REG_IO_HI, blr); csr |= PCI_COMMAND_IO_ENABLE; }
/* * Generic PCI bus enumeration routine. Used unless machine-dependent * code needs to provide something else. */ int pci_enumerate_bus(struct pci_softc *sc, const int *locators, int (*match)(const struct pci_attach_args *), struct pci_attach_args *pap) { pci_chipset_tag_t pc = sc->sc_pc; int device, function, nfunctions, ret; const struct pci_quirkdata *qd; pcireg_t id, bhlcr; pcitag_t tag; uint8_t devs[32]; int i, n; n = pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs, __arraycount(devs)); for (i = 0; i < n; i++) { device = devs[i]; if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) && (locators[PCICF_DEV] != device)) continue; tag = pci_make_tag(pc, sc->sc_bus, device, 0); bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlcr) > 2) continue; id = pci_conf_read(pc, tag, PCI_ID_REG); /* Invalid vendor ID value? */ if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) continue; /* XXX Not invalid, but we've done this ~forever. */ if (PCI_VENDOR(id) == 0) continue; qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id)); if (qd != NULL && (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0) nfunctions = 8; else if (qd != NULL && (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0) nfunctions = 1; else nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1; #ifdef __PCI_DEV_FUNCORDER char funcs[8]; int j; for (j = 0; j < nfunctions; j++) { funcs[j] = j; } if (j < __arraycount(funcs)) funcs[j] = -1; if (nfunctions > 1) { pci_dev_funcorder(sc->sc_pc, sc->sc_bus, device, nfunctions, funcs); } for (j = 0; j < 8 && (function = funcs[j]) < 8 && function >= 0; j++) { #else for (function = 0; function < nfunctions; function++) { #endif if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT) && (locators[PCICF_FUNCTION] != function)) continue; if (qd != NULL && (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0) continue; tag = pci_make_tag(pc, sc->sc_bus, device, function); ret = pci_probe_device(sc, tag, match, pap); if (match != NULL && ret != 0) return ret; } } return 0; } #endif /* PCI_MACHDEP_ENUMERATE_BUS */ /* * Vital Product Data (PCI 2.2) */ int pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count, pcireg_t *data) { uint32_t reg; int ofs, i, j; KASSERT(data != NULL); KASSERT((offset + count) < 0x7fff); if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0) return 1; for (i = 0; i < count; offset += sizeof(*data), i++) { reg &= 0x0000ffff; reg &= ~PCI_VPD_OPFLAG; reg |= PCI_VPD_ADDRESS(offset); pci_conf_write(pc, tag, ofs, reg); /* * PCI 2.2 does not specify how long we should poll * for completion nor whether the operation can fail. */ j = 0; do { if (j++ == 20) return 1; delay(4); reg = pci_conf_read(pc, tag, ofs); } while ((reg & PCI_VPD_OPFLAG) == 0); data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs)); } return 0; }
static void sii3112_chip_map(struct pciide_softc *sc, struct pci_attach_args *pa) { struct pciide_channel *cp; bus_size_t cmdsize, ctlsize; pcireg_t interface, scs_cmd, cfgctl; int channel; if (pciide_chipen(sc, pa) == 0) return; #define SII3112_RESET_BITS \ (SCS_CMD_PBM_RESET | SCS_CMD_ARB_RESET | \ SCS_CMD_FF1_RESET | SCS_CMD_FF0_RESET | \ SCS_CMD_IDE1_RESET | SCS_CMD_IDE0_RESET) /* * Reset everything and then unblock all of the interrupts. */ scs_cmd = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, scs_cmd | SII3112_RESET_BITS); delay(50 * 1000); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_SCS_CMD, scs_cmd & SCS_CMD_BA5_EN); delay(50 * 1000); if (scs_cmd & SCS_CMD_BA5_EN) { aprint_verbose("%s: SATALink BA5 register space enabled\n", sc->sc_wdcdev.sc_dev.dv_xname); if (pci_mapreg_map(pa, PCI_MAPREG_START + 0x14, PCI_MAPREG_TYPE_MEM| PCI_MAPREG_MEM_TYPE_32BIT, 0, &sc->sc_ba5_st, &sc->sc_ba5_sh, NULL, NULL) != 0) aprint_error("%s: unable to map SATALink BA5 " "register space\n", sc->sc_wdcdev.sc_dev.dv_xname); else sc->sc_ba5_en = 1; } else { aprint_verbose("%s: SATALink BA5 register space disabled\n", sc->sc_wdcdev.sc_dev.dv_xname); cfgctl = pci_conf_read(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL); pci_conf_write(pa->pa_pc, pa->pa_tag, SII3112_PCI_CFGCTL, cfgctl | CFGCTL_BA5INDEN); } aprint_normal("%s: bus-master DMA support present", sc->sc_wdcdev.sc_dev.dv_xname); pciide_mapreg_dma(sc, pa); aprint_normal("\n"); /* * Rev. <= 0x01 of the 3112 have a bug that can cause data * corruption if DMA transfers cross an 8K boundary. This is * apparently hard to tickle, but we'll go ahead and play it * safe. */ if (PCI_REVISION(pa->pa_class) <= 0x01) { sc->sc_dma_maxsegsz = 8192; sc->sc_dma_boundary = 8192; } sc->sc_wdcdev.cap |= WDC_CAPABILITY_DATA16 | WDC_CAPABILITY_DATA32 | WDC_CAPABILITY_MODE; sc->sc_wdcdev.PIO_cap = 4; if (sc->sc_dma_ok) { sc->sc_wdcdev.cap |= WDC_CAPABILITY_DMA | WDC_CAPABILITY_UDMA; sc->sc_wdcdev.cap |= WDC_CAPABILITY_IRQACK; sc->sc_wdcdev.irqack = pciide_irqack; sc->sc_wdcdev.DMA_cap = 2; sc->sc_wdcdev.UDMA_cap = 6; } sc->sc_wdcdev.set_modes = sii3112_setup_channel; /* We can use SControl and SStatus to probe for drives. */ sc->sc_wdcdev.drv_probe = sii3112_drv_probe; sc->sc_wdcdev.channels = sc->wdc_chanarray; sc->sc_wdcdev.nchannels = PCIIDE_NUM_CHANNELS; /* * The 3112 either identifies itself as a RAID storage device * or a Misc storage device. Fake up the interface bits for * what our driver expects. */ if (PCI_SUBCLASS(pa->pa_class) == PCI_SUBCLASS_MASS_STORAGE_IDE) { interface = PCI_INTERFACE(pa->pa_class); } else { interface = PCIIDE_INTERFACE_BUS_MASTER_DMA | PCIIDE_INTERFACE_PCI(0) | PCIIDE_INTERFACE_PCI(1); } for (channel = 0; channel < sc->sc_wdcdev.nchannels; channel++) { cp = &sc->pciide_channels[channel]; if (pciide_chansetup(sc, channel, interface) == 0) continue; pciide_mapchan(pa, cp, interface, &cmdsize, &ctlsize, pciide_pci_intr); } }