int puc_cardbus_match(struct device *parent, void *match, void *aux) { struct cardbus_attach_args *ca = aux; struct cardbus_devfunc *ct = ca->ca_ct; cardbus_chipset_tag_t cc = ct->ct_cc; cardbus_function_tag_t cf = ct->ct_cf; cardbusreg_t bhlc, reg; bhlc = cardbus_conf_read(cc, cf, ca->ca_tag, CARDBUS_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlc) != 0) return(0); /* this one is some sort of a bridge and not a puc */ if (PCI_VENDOR(ca->ca_id) == PCI_VENDOR_OXFORD2 && PCI_PRODUCT(ca->ca_id) == PCI_PRODUCT_OXFORD2_EXSYS_EX41098) return (0); reg = cardbus_conf_read(cc, cf, ca->ca_tag, PCI_SUBSYS_ID_REG); if (puc_find_description(PCI_VENDOR(ca->ca_id), PCI_PRODUCT(ca->ca_id), PCI_VENDOR(reg), PCI_PRODUCT(reg))) return (10); return (0); }
int pci_get_capability(pci_chipset_tag_t pc, pcitag_t tag, int capid, int *offset, pcireg_t *value) { pcireg_t reg; unsigned int ofs; reg = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (!(reg & PCI_STATUS_CAPLIST_SUPPORT)) return 0; /* Determine the Capability List Pointer register to start with. */ reg = pci_conf_read(pc, tag, PCI_BHLC_REG); switch (PCI_HDRTYPE_TYPE(reg)) { case 0: /* standard device header */ case 1: /* PCI-PCI bridge header */ ofs = PCI_CAPLISTPTR_REG; break; case 2: /* PCI-CardBus Bridge header */ ofs = PCI_CARDBUS_CAPLISTPTR_REG; break; default: return 0; } ofs = PCI_CAPLIST_PTR(pci_conf_read(pc, tag, ofs)); while (ofs != 0) { if ((ofs & 3) || (ofs < 0x40)) { int bus, device, function; pci_decompose_tag(pc, tag, &bus, &device, &function); printf("Skipping broken PCI header on %d:%d:%d\n", bus, device, function); break; } reg = pci_conf_read(pc, tag, ofs); if (PCI_CAPLIST_CAP(reg) == capid) { if (offset) *offset = ofs; if (value) *value = reg; return 1; } ofs = PCI_CAPLIST_NEXT(reg); } return 0; }
static int pci_scan_bus(struct pci_bus *bus) { int totaldev = 0; struct pci_func df; memset(&df, 0, sizeof(df)); df.bus = bus; for (df.dev = 0; df.dev < 32; df.dev++) { uint32_t bhlc = pci_conf_read(&df, PCI_BHLC_REG); struct pci_func f; if (PCI_HDRTYPE_TYPE(bhlc) > 1) /* Unsupported or no device */ continue; /* found a device */ totaldev++; f = df; for (f.func = 0; f.func < (PCI_HDRTYPE_MULTIFN(bhlc) ? 8 : 1); f.func++) { struct pci_func *af; uint32_t dev_id; uint32_t intr; dev_id = pci_conf_read(&f, PCI_ID_REG); if (PCI_VENDOR(dev_id) == 0xffff) continue; /* found a function */ af = kmem_alloc(sizeof(*af)); *af = f; list_init(&af->link); list_insert(&pci_func_list, &af->link); af->dev_id = dev_id; intr = pci_conf_read(af, PCI_INTERRUPT_REG); af->irq_line = PCI_INTERRUPT_LINE(intr); af->dev_class = pci_conf_read(af, PCI_CLASS_REG); #ifdef SHOW_PCI_VERBOSE_INFO pci_print_func(af); #endif } } return totaldev; }
int puc_pci_match(struct device *parent, void *match, void *aux) { struct pci_attach_args *pa = aux; const struct puc_device_description *desc; pcireg_t bhlc, subsys; bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlc) != 0) return (0); subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); desc = puc_find_description(PCI_VENDOR(pa->pa_id), PCI_PRODUCT(pa->pa_id), PCI_VENDOR(subsys), PCI_PRODUCT(subsys)); if (desc != NULL) return (1); return (0); }
static int puc_match(device_t parent, cfdata_t match, void *aux) { struct pci_attach_args *pa = aux; const struct puc_device_description *desc; pcireg_t bhlc, subsys; bhlc = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlc) != 0) return (0); subsys = pci_conf_read(pa->pa_pc, pa->pa_tag, PCI_SUBSYS_ID_REG); desc = puc_find_description(PCI_VENDOR(pa->pa_id), PCI_PRODUCT(pa->pa_id), PCI_VENDOR(subsys), PCI_PRODUCT(subsys)); if (desc != NULL) return (10); #if 0 /* * XXX this is obviously bogus. eventually, we might want * XXX to match communications/modem, etc., but that needs some * XXX special work in the match fn. */ /* * Match class/subclass, so we can tell people to compile kernel * with options that cause this driver to spew. */ if (PCI_CLASS(pa->pa_class) == PCI_CLASS_COMMUNICATIONS) { switch (PCI_SUBCLASS(pa->pa_class)) { case PCI_SUBCLASS_COMMUNICATIONS_SERIAL: case PCI_SUBCLASS_COMMUNICATIONS_MODEM: return (1); } } #endif return (0); }
int puc_cardbus_match(struct device *parent, void *match, void *aux) { struct cardbus_attach_args *ca = aux; pci_chipset_tag_t pc = ca->ca_pc; pcireg_t bhlc, reg; bhlc = pci_conf_read(pc, ca->ca_tag, PCI_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlc) != 0) return(0); /* this one is some sort of a bridge and not a puc */ if (PCI_VENDOR(ca->ca_id) == PCI_VENDOR_OXFORD2 && PCI_PRODUCT(ca->ca_id) == PCI_PRODUCT_OXFORD2_EXSYS_EX41098) return (0); reg = pci_conf_read(pc, ca->ca_tag, PCI_SUBSYS_ID_REG); if (puc_find_description(PCI_VENDOR(ca->ca_id), PCI_PRODUCT(ca->ca_id), PCI_VENDOR(reg), PCI_PRODUCT(reg))) return (10); return (0); }
static int pci_device_openbsd_probe(struct pci_device *device) { struct pci_device_private *priv = (struct pci_device_private *)device; struct pci_mem_region *region; uint64_t reg64, size64; uint32_t bar, reg, size; int domain, bus, dev, func, err; domain = device->domain; bus = device->bus; dev = device->dev; func = device->func; err = pci_read(domain, bus, dev, func, PCI_BHLC_REG, ®); if (err) return err; priv->header_type = PCI_HDRTYPE_TYPE(reg); if (priv->header_type != 0) return 0; region = device->regions; for (bar = PCI_MAPREG_START; bar < PCI_MAPREG_END; bar += sizeof(uint32_t), region++) { err = pci_read(domain, bus, dev, func, bar, ®); if (err) return err; /* Probe the size of the region. */ err = pci_readmask(domain, bus, dev, func, bar, &size); if (err) return err; if (PCI_MAPREG_TYPE(reg) == PCI_MAPREG_TYPE_IO) { region->is_IO = 1; region->base_addr = PCI_MAPREG_IO_ADDR(reg); region->size = PCI_MAPREG_IO_SIZE(size); } else { if (PCI_MAPREG_MEM_PREFETCHABLE(reg)) region->is_prefetchable = 1; switch(PCI_MAPREG_MEM_TYPE(reg)) { case PCI_MAPREG_MEM_TYPE_32BIT: case PCI_MAPREG_MEM_TYPE_32BIT_1M: region->base_addr = PCI_MAPREG_MEM_ADDR(reg); region->size = PCI_MAPREG_MEM_SIZE(size); break; case PCI_MAPREG_MEM_TYPE_64BIT: region->is_64 = 1; reg64 = reg; size64 = size; bar += sizeof(uint32_t); err = pci_read(domain, bus, dev, func, bar, ®); if (err) return err; reg64 |= (uint64_t)reg << 32; err = pci_readmask(domain, bus, dev, func, bar, &size); if (err) return err; size64 |= (uint64_t)size << 32; region->base_addr = PCI_MAPREG_MEM64_ADDR(reg64); region->size = PCI_MAPREG_MEM64_SIZE(size64); region++; break; } } } /* Probe expansion ROM if present */ err = pci_read(domain, bus, dev, func, PCI_ROM_REG, ®); if (err) return err; if (reg != 0) { err = pci_write(domain, bus, dev, func, PCI_ROM_REG, ~PCI_ROM_ENABLE); if (err) return err; pci_read(domain, bus, dev, func, PCI_ROM_REG, &size); pci_write(domain, bus, dev, func, PCI_ROM_REG, reg); if (PCI_ROM_ADDR(reg) != 0) { priv->rom_base = PCI_ROM_ADDR(reg); device->rom_size = PCI_ROM_SIZE(size); } } return 0; }
void pciaddr_resource_manage(struct pcibios_softc *sc, pci_chipset_tag_t pc, pcitag_t tag, pciaddr_resource_manage_func_t func) { struct extent *ex; pcireg_t val, mask; bus_addr_t addr; bus_size_t size; int error, mapreg, type, reg_start, reg_end, width; val = pci_conf_read(pc, tag, PCI_BHLC_REG); switch (PCI_HDRTYPE_TYPE(val)) { default: printf("WARNING: unknown PCI device header 0x%x.\n", PCI_HDRTYPE_TYPE(val)); sc->nbogus++; return; case 0: reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_END; break; case 1: /* PCI-PCI bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PPB_END; break; case 2: /* PCI-CardBus bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PCB_END; break; } error = 0; for (mapreg = reg_start; mapreg < reg_end; mapreg += width) { /* inquire PCI device bus space requirement */ val = pci_conf_read(pc, tag, mapreg); pci_conf_write(pc, tag, mapreg, ~0); mask = pci_conf_read(pc, tag, mapreg); pci_conf_write(pc, tag, mapreg, val); type = PCI_MAPREG_TYPE(val); width = 4; if (type == PCI_MAPREG_TYPE_MEM) { if (PCI_MAPREG_MEM_TYPE(val) == PCI_MAPREG_MEM_TYPE_64BIT) { /* XXX We could examine the upper 32 bits * XXX of the BAR here, but we are totally * XXX unprepared to handle a non-zero value, * XXX either here or anywhere else in * XXX i386-land. * XXX So just arrange to not look at the * XXX upper 32 bits, lest we misinterpret * XXX it as a 32-bit BAR set to zero. */ width = 8; } addr = PCI_MAPREG_MEM_ADDR(val); size = PCI_MAPREG_MEM_SIZE(mask); ex = sc->extent_mem; } else { /* XXX some devices give 32bit value */ addr = PCI_MAPREG_IO_ADDR(val) & PCIADDR_PORT_END; size = PCI_MAPREG_IO_SIZE(mask); ex = sc->extent_port; } if (!size) /* unused register */ continue; /* reservation/allocation phase */ error += (*func) (sc, pc, tag, mapreg, ex, type, &addr, size); PCIBIOS_PRINTV(("\t%02xh %s 0x%08x 0x%08x\n", mapreg, type ? "port" : "mem ", (unsigned int)addr, (unsigned int)size)); } if (error) sc->nbogus++; PCIBIOS_PRINTV(("\t\t[%s]\n", error ? "NG" : "OK")); }
/* * We can't use the generic pci_enumerate_bus, because the hypervisor * may hide the function 0 from us, while other functions are * available */ int xen_pci_enumerate_bus(struct pci_softc *sc, const int *locators, int (*match)(struct pci_attach_args *), struct pci_attach_args *pap) { pci_chipset_tag_t pc = sc->sc_pc; int device, function, nfunctions, ret; const struct pci_quirkdata *qd; pcireg_t id, bhlcr; pcitag_t tag; for (device = 0; device < sc->sc_maxndevs; device++) { if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) && (locators[PCICF_DEV] != device)) continue; tag = pci_make_tag(pc, sc->sc_bus, device, 0); bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); id = pci_conf_read(pc, tag, PCI_ID_REG); qd = NULL; if (PCI_VENDOR(id) != PCI_VENDOR_INVALID) { /* XXX Not invalid, but we've done this ~forever. */ if (PCI_VENDOR(id) == 0) continue; if (PCI_HDRTYPE_TYPE(bhlcr) > 2) continue; qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id)); if (qd != NULL && (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0) nfunctions = 8; else if (qd != NULL && (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0) nfunctions = 1; else nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1; } else { /* * Vendor ID invalid. This may be because there's no * device, or because the hypervisor is hidding * function 0 from us. Try to probe other functions * anyway. */ nfunctions = 8; } for (function = 0; function < nfunctions; function++) { if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT) && (locators[PCICF_FUNCTION] != function)) continue; if (qd != NULL && (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0) continue; tag = pci_make_tag(pc, sc->sc_bus, device, function); ret = pci_probe_device(sc, tag, match, pap); if (match != NULL && ret != 0) return (ret); } } return (0); }
/* * Generic PCI bus enumeration routine. Used unless machine-dependent * code needs to provide something else. */ int pci_enumerate_bus(struct pci_softc *sc, const int *locators, int (*match)(const struct pci_attach_args *), struct pci_attach_args *pap) { pci_chipset_tag_t pc = sc->sc_pc; int device, function, nfunctions, ret; const struct pci_quirkdata *qd; pcireg_t id, bhlcr; pcitag_t tag; uint8_t devs[32]; int i, n; n = pci_bus_devorder(sc->sc_pc, sc->sc_bus, devs, __arraycount(devs)); for (i = 0; i < n; i++) { device = devs[i]; if ((locators[PCICF_DEV] != PCICF_DEV_DEFAULT) && (locators[PCICF_DEV] != device)) continue; tag = pci_make_tag(pc, sc->sc_bus, device, 0); bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlcr) > 2) continue; id = pci_conf_read(pc, tag, PCI_ID_REG); /* Invalid vendor ID value? */ if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) continue; /* XXX Not invalid, but we've done this ~forever. */ if (PCI_VENDOR(id) == 0) continue; qd = pci_lookup_quirkdata(PCI_VENDOR(id), PCI_PRODUCT(id)); if (qd != NULL && (qd->quirks & PCI_QUIRK_MULTIFUNCTION) != 0) nfunctions = 8; else if (qd != NULL && (qd->quirks & PCI_QUIRK_MONOFUNCTION) != 0) nfunctions = 1; else nfunctions = PCI_HDRTYPE_MULTIFN(bhlcr) ? 8 : 1; #ifdef __PCI_DEV_FUNCORDER char funcs[8]; int j; for (j = 0; j < nfunctions; j++) { funcs[j] = j; } if (j < __arraycount(funcs)) funcs[j] = -1; if (nfunctions > 1) { pci_dev_funcorder(sc->sc_pc, sc->sc_bus, device, nfunctions, funcs); } for (j = 0; j < 8 && (function = funcs[j]) < 8 && function >= 0; j++) { #else for (function = 0; function < nfunctions; function++) { #endif if ((locators[PCICF_FUNCTION] != PCICF_FUNCTION_DEFAULT) && (locators[PCICF_FUNCTION] != function)) continue; if (qd != NULL && (qd->quirks & PCI_QUIRK_SKIP_FUNC(function)) != 0) continue; tag = pci_make_tag(pc, sc->sc_bus, device, function); ret = pci_probe_device(sc, tag, match, pap); if (match != NULL && ret != 0) return ret; } } return 0; } #endif /* PCI_MACHDEP_ENUMERATE_BUS */ /* * Vital Product Data (PCI 2.2) */ int pci_vpd_read(pci_chipset_tag_t pc, pcitag_t tag, int offset, int count, pcireg_t *data) { uint32_t reg; int ofs, i, j; KASSERT(data != NULL); KASSERT((offset + count) < 0x7fff); if (pci_get_capability(pc, tag, PCI_CAP_VPD, &ofs, ®) == 0) return 1; for (i = 0; i < count; offset += sizeof(*data), i++) { reg &= 0x0000ffff; reg &= ~PCI_VPD_OPFLAG; reg |= PCI_VPD_ADDRESS(offset); pci_conf_write(pc, tag, ofs, reg); /* * PCI 2.2 does not specify how long we should poll * for completion nor whether the operation can fail. */ j = 0; do { if (j++ == 20) return 1; delay(4); reg = pci_conf_read(pc, tag, ofs); } while ((reg & PCI_VPD_OPFLAG) == 0); data[i] = pci_conf_read(pc, tag, PCI_VPD_DATAREG(ofs)); } return 0; }
int pci_probe_device(struct pci_softc *sc, pcitag_t tag, int (*match)(const struct pci_attach_args *), struct pci_attach_args *pap) { pci_chipset_tag_t pc = sc->sc_pc; struct pci_attach_args pa; pcireg_t id, /* csr, */ pciclass, intr, bhlcr, bar, endbar; #ifdef __HAVE_PCI_MSI_MSIX pcireg_t cap; int off; #endif int ret, pin, bus, device, function, i, width; int locs[PCICF_NLOCS]; pci_decompose_tag(pc, tag, &bus, &device, &function); /* a driver already attached? */ if (sc->PCI_SC_DEVICESC(device, function).c_dev != NULL && !match) return 0; bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); if (PCI_HDRTYPE_TYPE(bhlcr) > 2) return 0; id = pci_conf_read(pc, tag, PCI_ID_REG); /* csr = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); */ pciclass = pci_conf_read(pc, tag, PCI_CLASS_REG); /* Invalid vendor ID value? */ if (PCI_VENDOR(id) == PCI_VENDOR_INVALID) return 0; /* XXX Not invalid, but we've done this ~forever. */ if (PCI_VENDOR(id) == 0) return 0; /* Collect memory range info */ memset(sc->PCI_SC_DEVICESC(device, function).c_range, 0, sizeof(sc->PCI_SC_DEVICESC(device, function).c_range)); i = 0; switch (PCI_HDRTYPE_TYPE(bhlcr)) { case PCI_HDRTYPE_PPB: endbar = PCI_MAPREG_PPB_END; break; case PCI_HDRTYPE_PCB: endbar = PCI_MAPREG_PCB_END; break; default: endbar = PCI_MAPREG_END; break; } for (bar = PCI_MAPREG_START; bar < endbar; bar += width) { struct pci_range *r; pcireg_t type; width = 4; if (pci_mapreg_probe(pc, tag, bar, &type) == 0) continue; if (PCI_MAPREG_TYPE(type) == PCI_MAPREG_TYPE_MEM) { if (PCI_MAPREG_MEM_TYPE(type) == PCI_MAPREG_MEM_TYPE_64BIT) width = 8; r = &sc->PCI_SC_DEVICESC(device, function).c_range[i++]; if (pci_mapreg_info(pc, tag, bar, type, &r->r_offset, &r->r_size, &r->r_flags) != 0) break; if ((PCI_VENDOR(id) == PCI_VENDOR_ATI) && (bar == 0x10) && (r->r_size == 0x1000000)) { struct pci_range *nr; /* * this has to be a mach64 * split things up so each half-aperture can * be mapped PREFETCHABLE except the last page * which may contain registers */ r->r_size = 0x7ff000; r->r_flags = BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE; nr = &sc->PCI_SC_DEVICESC(device, function).c_range[i++]; nr->r_offset = r->r_offset + 0x800000; nr->r_size = 0x7ff000; nr->r_flags = BUS_SPACE_MAP_LINEAR | BUS_SPACE_MAP_PREFETCHABLE; } } } pa.pa_iot = sc->sc_iot; pa.pa_memt = sc->sc_memt; pa.pa_dmat = sc->sc_dmat; pa.pa_dmat64 = sc->sc_dmat64; pa.pa_pc = pc; pa.pa_bus = bus; pa.pa_device = device; pa.pa_function = function; pa.pa_tag = tag; pa.pa_id = id; pa.pa_class = pciclass; /* * Set up memory, I/O enable, and PCI command flags * as appropriate. */ pa.pa_flags = sc->sc_flags; /* * If the cache line size is not configured, then * clear the MRL/MRM/MWI command-ok flags. */ if (PCI_CACHELINE(bhlcr) == 0) { pa.pa_flags &= ~(PCI_FLAGS_MRL_OKAY| PCI_FLAGS_MRM_OKAY|PCI_FLAGS_MWI_OKAY); } if (sc->sc_bridgetag == NULL) { pa.pa_intrswiz = 0; pa.pa_intrtag = tag; } else { pa.pa_intrswiz = sc->sc_intrswiz + device; pa.pa_intrtag = sc->sc_intrtag; } intr = pci_conf_read(pc, tag, PCI_INTERRUPT_REG); pin = PCI_INTERRUPT_PIN(intr); pa.pa_rawintrpin = pin; if (pin == PCI_INTERRUPT_PIN_NONE) { /* no interrupt */ pa.pa_intrpin = 0; } else { /* * swizzle it based on the number of busses we're * behind and our device number. */ pa.pa_intrpin = /* XXX */ ((pin + pa.pa_intrswiz - 1) % 4) + 1; } pa.pa_intrline = PCI_INTERRUPT_LINE(intr); #ifdef __HAVE_PCI_MSI_MSIX if (pci_get_ht_capability(pc, tag, PCI_HT_CAP_MSIMAP, &off, &cap)) { /* * XXX Should we enable MSI mapping ourselves on * systems that have it disabled? */ if (cap & PCI_HT_MSI_ENABLED) { uint64_t addr; if ((cap & PCI_HT_MSI_FIXED) == 0) { addr = pci_conf_read(pc, tag, off + PCI_HT_MSI_ADDR_LO); addr |= (uint64_t)pci_conf_read(pc, tag, off + PCI_HT_MSI_ADDR_HI) << 32; } else addr = PCI_HT_MSI_FIXED_ADDR; /* * XXX This will fail to enable MSI on systems * that don't use the canonical address. */ if (addr == PCI_HT_MSI_FIXED_ADDR) { pa.pa_flags |= PCI_FLAGS_MSI_OKAY; pa.pa_flags |= PCI_FLAGS_MSIX_OKAY; } } } #endif if (match != NULL) { ret = (*match)(&pa); if (ret != 0 && pap != NULL) *pap = pa; } else { struct pci_child *c; locs[PCICF_DEV] = device; locs[PCICF_FUNCTION] = function; c = &sc->PCI_SC_DEVICESC(device, function); pci_conf_capture(pc, tag, &c->c_conf); if (pci_get_powerstate(pc, tag, &c->c_powerstate) == 0) c->c_psok = true; else c->c_psok = false; c->c_dev = config_found_sm_loc(sc->sc_dev, "pci", locs, &pa, pciprint, config_stdsubmatch); ret = (c->c_dev != NULL); } return ret; }
void ppb_alloc_resources(struct ppb_softc *sc, struct pci_attach_args *pa) { pci_chipset_tag_t pc = sc->sc_pc; pcireg_t id, busdata, blr, bhlcr, type, csr; pcireg_t addr, mask; pcitag_t tag; int bus, dev; int reg, reg_start, reg_end, reg_rom; int io_count = 0; int mem_count = 0; bus_addr_t start, end; u_long base, size; if (pa->pa_memex == NULL) return; busdata = pci_conf_read(pc, sc->sc_tag, PPB_REG_BUSINFO); bus = PPB_BUSINFO_SECONDARY(busdata); if (bus == 0) return; /* * Count number of devices. If there are no devices behind * this bridge, there's no point in allocating any address * space. */ for (dev = 0; dev < pci_bus_maxdevs(pc, bus); dev++) { tag = pci_make_tag(pc, bus, dev, 0); id = pci_conf_read(pc, tag, PCI_ID_REG); if (PCI_VENDOR(id) == PCI_VENDOR_INVALID || PCI_VENDOR(id) == 0) continue; bhlcr = pci_conf_read(pc, tag, PCI_BHLC_REG); switch (PCI_HDRTYPE_TYPE(bhlcr)) { case 0: reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_END; reg_rom = PCI_ROM_REG; break; case 1: /* PCI-PCI bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PPB_END; reg_rom = 0; /* 0x38 */ break; case 2: /* PCI-Cardbus bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PCB_END; reg_rom = 0; break; default: return; } for (reg = reg_start; reg < reg_end; reg += 4) { if (pci_mapreg_probe(pc, tag, reg, &type) == 0) continue; if (type == PCI_MAPREG_TYPE_IO) io_count++; else mem_count++; } if (reg_rom != 0) { addr = pci_conf_read(pc, tag, reg_rom); pci_conf_write(pc, tag, reg_rom, ~PCI_ROM_ENABLE); mask = pci_conf_read(pc, tag, reg_rom); pci_conf_write(pc, tag, reg_rom, addr); if (PCI_ROM_SIZE(mask)) mem_count++; } } csr = pci_conf_read(pc, sc->sc_tag, PCI_COMMAND_STATUS_REG); /* * Get the bridge in a consistent state. If memory mapped I/O * is disabled, disabled the associated windows as well. */ if ((csr & PCI_COMMAND_MEM_ENABLE) == 0) { pci_conf_write(pc, sc->sc_tag, PPB_REG_MEM, 0x0000ffff); pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFMEM, 0x0000ffff); pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFBASE_HI32, 0); pci_conf_write(pc, sc->sc_tag, PPB_REG_PREFLIM_HI32, 0); } /* Allocate I/O address space if necessary. */ if (io_count > 0 && pa->pa_ioex) { blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IOSTATUS); sc->sc_iobase = (blr << PPB_IO_SHIFT) & PPB_IO_MASK; sc->sc_iolimit = (blr & PPB_IO_MASK) | 0x00000fff; blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IO_HI); sc->sc_iobase |= (blr & 0x0000ffff) << 16; sc->sc_iolimit |= (blr & 0xffff0000); if (sc->sc_iolimit < sc->sc_iobase || sc->sc_iobase == 0) { start = max(PCI_IO_START, pa->pa_ioex->ex_start); end = min(PCI_IO_END, pa->pa_ioex->ex_end); for (size = 0x2000; size >= PPB_IO_MIN; size >>= 1) if (extent_alloc_subregion(pa->pa_ioex, start, end, size, size, 0, 0, 0, &base) == 0) break; if (size >= PPB_IO_MIN) { sc->sc_iobase = base; sc->sc_iolimit = base + size - 1; blr = pci_conf_read(pc, sc->sc_tag, PPB_REG_IOSTATUS); blr &= 0xffff0000; blr |= sc->sc_iolimit & PPB_IO_MASK; blr |= (sc->sc_iobase >> PPB_IO_SHIFT); pci_conf_write(pc, sc->sc_tag, PPB_REG_IOSTATUS, blr); blr = (sc->sc_iobase & 0xffff0000) >> 16; blr |= sc->sc_iolimit & 0xffff0000; pci_conf_write(pc, sc->sc_tag, PPB_REG_IO_HI, blr); csr |= PCI_COMMAND_IO_ENABLE; }
void pciaddr_resource_manage(pci_chipset_tag_t pc, pcitag_t tag, pciaddr_resource_manage_func_t func, void *ctx) { pcireg_t val, mask; bus_addr_t addr; bus_size_t size; int error, useport, usemem, mapreg, type, reg_start, reg_end, width; val = pci_conf_read(pc, tag, PCI_BHLC_REG); switch (PCI_HDRTYPE_TYPE(val)) { default: aprint_error("WARNING: unknown PCI device header."); pciaddr.nbogus++; return; case PCI_HDRTYPE_DEVICE: reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_END; break; case PCI_HDRTYPE_PPB: /* PCI-PCI bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PPB_END; break; case PCI_HDRTYPE_PCB: /* PCI-CardBus bridge */ reg_start = PCI_MAPREG_START; reg_end = PCI_MAPREG_PCB_END; break; } error = useport = usemem = 0; for (mapreg = reg_start; mapreg < reg_end; mapreg += width) { /* inquire PCI device bus space requirement */ val = pci_conf_read(pc, tag, mapreg); pci_conf_write(pc, tag, mapreg, ~0); mask = pci_conf_read(pc, tag, mapreg); pci_conf_write(pc, tag, mapreg, val); type = PCI_MAPREG_TYPE(val); width = 4; if (type == PCI_MAPREG_TYPE_MEM) { if (PCI_MAPREG_MEM_TYPE(val) == PCI_MAPREG_MEM_TYPE_64BIT) { /* XXX We could examine the upper 32 bits * XXX of the BAR here, but we are totally * XXX unprepared to handle a non-zero value, * XXX either here or anywhere else in * XXX i386-land. * XXX So just arrange to not look at the * XXX upper 32 bits, lest we misinterpret * XXX it as a 32-bit BAR set to zero. */ width = 8; } size = PCI_MAPREG_MEM_SIZE(mask); } else { size = PCI_MAPREG_IO_SIZE(mask); } addr = pciaddr_ioaddr(val); if (size == 0) /* unused register */ continue; if (type == PCI_MAPREG_TYPE_MEM) ++usemem; else ++useport; /* reservation/allocation phase */ error += (*func) (pc, tag, mapreg, ctx, type, &addr, size); aprint_debug("\n\t%02xh %s 0x%08x 0x%08x", mapreg, type ? "port" : "mem ", (unsigned int)addr, (unsigned int)size); } /* enable/disable PCI device */ val = pci_conf_read(pc, tag, PCI_COMMAND_STATUS_REG); if (error == 0) val |= (PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE); else val &= ~(PCI_COMMAND_IO_ENABLE | PCI_COMMAND_MEM_ENABLE | PCI_COMMAND_MASTER_ENABLE); pci_conf_write(pc, tag, PCI_COMMAND_STATUS_REG, val); if (error != 0) pciaddr.nbogus++; aprint_debug("\n\t\t[%s]\n", error ? "NG" : "OK"); }