static int ebus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { struct ebus_softc *sc; struct ebus_rinfo *eri; bus_space_tag_t bt; bus_space_handle_t bh; int i, rv; sc = device_get_softc(bus); if ((sc->sc_flags & EBUS_PCI) != 0 && type == SYS_RES_MEMORY) { for (i = 0; i < sc->sc_nrange; i++) { eri = &sc->sc_rinfo[i]; if (rman_is_region_manager(res, &eri->eri_rman) != 0) { bt = rman_get_bustag(eri->eri_res); rv = bus_space_subregion(bt, rman_get_bushandle(eri->eri_res), rman_get_start(res) - rman_get_start(eri->eri_res), rman_get_size(res), &bh); if (rv != 0) return (rv); rman_set_bustag(res, bt); rman_set_bushandle(res, bh); return (rman_activate_resource(res)); } } return (EINVAL); } return (bus_generic_activate_resource(bus, child, type, rid, res)); }
int ofw_pci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct ofw_pci_softc *sc; struct bus_space_tag *tag; sc = device_get_softc(bus); switch (type) { case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, type, rid, r)); case SYS_RES_MEMORY: tag = sparc64_alloc_bus_tag(r, PCI_MEMORY_BUS_SPACE); if (tag == NULL) return (ENOMEM); rman_set_bustag(r, tag); rman_set_bushandle(r, sc->sc_pci_bh[OFW_PCI_CS_MEM32] + rman_get_start(r)); break; case SYS_RES_IOPORT: rman_set_bustag(r, sc->sc_pci_iot); rman_set_bushandle(r, sc->sc_pci_bh[OFW_PCI_CS_IO] + rman_get_start(r)); break; } return (rman_activate_resource(r)); }
static int sbus_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *r) { struct sbus_softc *sc; struct bus_space_tag *tag; int i; switch (type) { case SYS_RES_IRQ: return (bus_generic_activate_resource(bus, child, type, rid, r)); case SYS_RES_MEMORY: sc = device_get_softc(bus); for (i = 0; i < sc->sc_nrange; i++) { if (rman_is_region_manager(r, &sc->sc_rd[i].rd_rman) != 0) { tag = sparc64_alloc_bus_tag(r, SBUS_BUS_SPACE); if (tag == NULL) return (ENOMEM); rman_set_bustag(r, tag); rman_set_bushandle(r, sc->sc_rd[i].rd_bushandle + rman_get_start(r)); return (rman_activate_resource(r)); } } /* FALLTHROUGH */ default: return (EINVAL); } }
static int octopci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { bus_space_handle_t bh; int error; switch (type) { case SYS_RES_IRQ: error = bus_generic_activate_resource(bus, child, type, rid, res); if (error != 0) return (error); return (0); case SYS_RES_MEMORY: case SYS_RES_IOPORT: error = bus_space_map(rman_get_bustag(res), rman_get_bushandle(res), rman_get_size(res), 0, &bh); if (error != 0) return (error); rman_set_bushandle(res, bh); break; default: return (ENXIO); } error = rman_activate_resource(res); if (error != 0) return (error); return (0); }
static int chipc_activate_resource(device_t dev, device_t child, int type, int rid, struct resource *r) { struct chipc_softc *sc; struct rman *rm; sc = device_get_softc(dev); /* Delegate non-locally managed resources to parent */ rm = chipc_get_rman(sc, type); if (rm == NULL || !rman_is_region_manager(r, rm)) { return (bus_generic_activate_resource(dev, child, type, rid, r)); } /* Try activating the chipc region-based resource */ return (chipc_try_activate_resource(sc, child, type, rid, r, true)); }
static int zbpci_activate_resource(device_t bus, device_t child, int type, int rid, struct resource *res) { int error; void *vaddr; u_long orig_paddr, paddr, psize; paddr = rman_get_start(res); psize = rman_get_size(res); orig_paddr = paddr; #if _BYTE_ORDER == _BIG_ENDIAN /* * The CFE allocates PCI memory resources that map to the * "match byte lanes" address space. This address space works * best for DMA transfers because it does not do any automatic * byte swaps when data crosses the pci-cpu interface. * * This also makes it sub-optimal for accesses to PCI device * registers because it exposes the little-endian nature of * the PCI bus to the big-endian CPU. The Sibyte has another * address window called the "match bit lanes" window which * automatically swaps bytes when data crosses the pci-cpu * interface. * * We "assume" that any bus_space memory accesses done by the * CPU to a PCI device are register/configuration accesses and * are done through the "match bit lanes" window. Any DMA * transfers will continue to be through the "match byte lanes" * window because the PCI BAR registers will not be changed. */ if (type == SYS_RES_MEMORY) { if (paddr >= PCI_MATCH_BYTE_LANES_START && paddr + psize - 1 <= PCI_MATCH_BYTE_LANES_END) { paddr |= PCI_MATCH_BIT_LANES_MASK; rman_set_start(res, paddr); rman_set_end(res, paddr + psize - 1); } } #endif if (type != SYS_RES_IOPORT) { error = bus_generic_activate_resource(bus, child, type, rid, res); #if _BYTE_ORDER == _BIG_ENDIAN if (type == SYS_RES_MEMORY) { rman_set_start(res, orig_paddr); rman_set_end(res, orig_paddr + psize - 1); } #endif return (error); } /* * Map the I/O space resource through the memory window starting * at PCI_IOSPACE_ADDR. */ vaddr = pmap_mapdev(paddr + PCI_IOSPACE_ADDR, psize); rman_set_virtual(res, vaddr); rman_set_bustag(res, mips_bus_space_generic); rman_set_bushandle(res, (bus_space_handle_t)vaddr); return (rman_activate_resource(res)); }