static void xlr_pci_init_resources(void) { irq_rman.rm_start = 0; irq_rman.rm_end = 255; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "PCI Mapped Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, 255)) panic("pci_init_resources irq_rman"); port_rman.rm_start = 0; port_rman.rm_end = ~0ul; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0x10000000, 0x1fffffff)) panic("pci_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0xd0000000, 0xdfffffff)) panic("pci_init_resources mem_rman"); }
int obio_attach(device_t dev) { struct obio_softc *sc = device_get_softc(dev); sc->oba_st = MIPS_BUS_SPACE_IO; sc->oba_addr = MIPS_PHYS_TO_KSEG1(MALTA_UART0ADR); sc->oba_size = MALTA_PCIMEM3_SIZE; sc->oba_rman.rm_type = RMAN_ARRAY; sc->oba_rman.rm_descr = "OBIO I/O"; if (rman_init(&sc->oba_rman) != 0 || rman_manage_region(&sc->oba_rman, sc->oba_addr, sc->oba_addr + sc->oba_size) != 0) panic("obio_attach: failed to set up I/O rman"); sc->oba_irq_rman.rm_type = RMAN_ARRAY; sc->oba_irq_rman.rm_descr = "OBIO IRQ"; /* * This module is intended for UART purposes only and * it's IRQ is 4 */ if (rman_init(&sc->oba_irq_rman) != 0 || rman_manage_region(&sc->oba_irq_rman, 4, 4) != 0) panic("obio_attach: failed to set up IRQ rman"); device_add_child(dev, "uart", 0); bus_generic_probe(dev); bus_generic_attach(dev); return (0); }
void pci_init_resources() { irq_rman.rm_start = 0; irq_rman.rm_end = 32; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "PCI Interrupt request lines"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, 31)) panic("cia_probe irq_rman"); port_rman.rm_start = 0; port_rman.rm_end = 0xffff; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0, 0xffff)) panic("cia_probe port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0u; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0x0, (1L << 32))) panic("cia_probe mem_rman"); }
void isa_init_intr(void) { static int initted = 0; if (initted) return; initted = 1; isa_irq_rman.rm_start = 0; isa_irq_rman.rm_end = 15; isa_irq_rman.rm_type = RMAN_ARRAY; isa_irq_rman.rm_descr = "ISA Interrupt request lines"; if (rman_init(&isa_irq_rman) || rman_manage_region(&isa_irq_rman, 0, 1) || rman_manage_region(&isa_irq_rman, 3, 15)) panic("isa_probe isa_irq_rman"); isa_drq_rman.rm_start = 0; isa_drq_rman.rm_end = 7; isa_drq_rman.rm_type = RMAN_ARRAY; isa_drq_rman.rm_descr = "ISA DMA request lines"; if (rman_init(&isa_drq_rman) || rman_manage_region(&isa_drq_rman, 0, 7)) panic("isa_probe isa_drq_rman"); /* mask all isa interrupts */ outb(IO_ICU1+1, 0xff); outb(IO_ICU2+1, 0xff); /* make sure chaining irq is enabled */ isa_intr_enable(2); }
static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = BUS_SPACE_MAXADDR; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, BUS_SPACE_MAXADDR)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); nexus_add_child(dev, 8, "timer", 0); nexus_add_child(dev, 9, "rcons", 0); nexus_add_child(dev, 10, "ofwbus", 0); bus_generic_probe(dev); bus_generic_attach(dev); return (0); }
void pci_init_resources(void) { irq_rman.rm_start = 0; irq_rman.rm_end = 65536; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "PCI Mapped Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, 65536)) panic("pci_init_resources irq_rman"); port_rman.rm_start = 0; port_rman.rm_end = ~0u; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, 0x0, (1L << 32))) panic("pci_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0u; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0x0, (1L << 32))) panic("pci_init_resources mem_rman"); }
static int vnex_attach(device_t dev) { struct vnex_devinfo *vndi; struct vnex_softc *sc; device_t cdev; phandle_t node; mde_cookie_t rootnode, *listp = NULL; int i, listsz, num_nodes, num_devices; md_t *mdp; node = ofw_bus_get_node(dev); if (node == -1) panic("%s: ofw_bus_get_node failed.", __func__); sc = device_get_softc(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "Interrupts"; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "Device Memory"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, IV_MAX - 1) != 0 || rman_manage_region(&sc->sc_mem_rman, 0ULL, ~0ULL) != 0) panic("%s: failed to set up rmans.", __func__); if ((mdp = md_get()) == NULL) return (ENXIO); num_nodes = md_node_count(mdp); listsz = num_nodes * sizeof(mde_cookie_t); listp = (mde_cookie_t *)malloc(listsz, M_DEVBUF, M_WAITOK); rootnode = md_root_node(mdp); /* * scan the machine description for virtual devices */ num_devices = md_scan_dag(mdp, rootnode, md_find_name(mdp, "virtual-device"), md_find_name(mdp, "fwd"), listp); for (i = 0; i < num_devices; i++) { if ((vndi = vnex_setup_dinfo(dev, listp[i])) == NULL) continue; cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", vndi->vndi_mbdinfo.mbd_name); vnex_destroy_dinfo(vndi); continue; } device_set_ivars(cdev, vndi); } bus_generic_attach(dev); free(listp, M_DEVBUF); return (0); }
static int nexus_probe(device_t dev) { device_set_desc(dev, "MIPS32 root nexus"); irq_rman.rm_start = 0; irq_rman.rm_end = NUM_MIPS_IRQS - 1; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Hardware IRQs"; if (rman_init(&irq_rman) != 0 || rman_manage_region(&irq_rman, 0, NUM_MIPS_IRQS - 1) != 0) { panic("%s: irq_rman", __func__); } mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "Memory addresses"; if (rman_init(&mem_rman) != 0 || rman_manage_region(&mem_rman, 0, ~0) != 0) { panic("%s: mem_rman", __func__); } return (0); }
static int ixp425_attach(device_t dev) { struct ixp425_softc *sc; device_printf(dev, "%b\n", ixp4xx_read_feature_bits(), EXP_FCTRL_BITS); sc = device_get_softc(dev); sc->sc_iot = &ixp425_bs_tag; KASSERT(ixp425_softc == NULL, ("%s called twice?", __func__)); ixp425_softc = sc; intr_enabled = 0; ixp425_set_intrmask(); ixp425_set_intrsteer(); if (cpu_is_ixp43x()) { intr_enabled2 = 0; ixp435_set_intrmask(); ixp435_set_intrsteer(); } if (bus_dma_tag_create(NULL, 1, 0, BUS_SPACE_MAXADDR_32BIT, BUS_SPACE_MAXADDR, NULL, NULL, 0xffffffff, 0xff, 0xffffffff, 0, NULL, NULL, &sc->sc_dmat)) panic("%s: failed to create dma tag", __func__); sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "IXP4XX IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 0, cpu_is_ixp43x() ? 63 : 31) != 0) panic("%s: failed to set up IRQ rman", __func__); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "IXP4XX Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, 0, ~0) != 0) panic("%s: failed to set up memory rman", __func__); BUS_ADD_CHILD(dev, 0, "pcib", 0); BUS_ADD_CHILD(dev, 0, "ixpclk", 0); BUS_ADD_CHILD(dev, 0, "ixpiic", 0); /* XXX move to hints? */ BUS_ADD_CHILD(dev, 0, "ixpwdog", 0); /* attach wired devices via hints */ bus_enumerate_hinted_children(dev); if (bus_space_map(sc->sc_iot, IXP425_GPIO_HWBASE, IXP425_GPIO_SIZE, 0, &sc->sc_gpio_ioh)) panic("%s: unable to map GPIO registers", __func__); if (bus_space_map(sc->sc_iot, IXP425_EXP_HWBASE, IXP425_EXP_SIZE, 0, &sc->sc_exp_ioh)) panic("%s: unable to map Expansion Bus registers", __func__); bus_generic_probe(dev); bus_generic_attach(dev); return (0); }
static int obio_attach(device_t dev) { struct obio_softc *sc = device_get_softc(dev); int rid; sc->oba_mem_rman.rm_type = RMAN_ARRAY; sc->oba_mem_rman.rm_descr = "OBIO memeory"; if (rman_init(&sc->oba_mem_rman) != 0 || rman_manage_region(&sc->oba_mem_rman, OBIO_MEM_START, OBIO_MEM_START + OBIO_MEM_SIZE) != 0) panic("obio_attach: failed to set up I/O rman"); sc->oba_irq_rman.rm_type = RMAN_ARRAY; sc->oba_irq_rman.rm_descr = "OBIO IRQ"; if (rman_init(&sc->oba_irq_rman) != 0 || rman_manage_region(&sc->oba_irq_rman, 0, NIRQS-1) != 0) panic("obio_attach: failed to set up IRQ rman"); /* Hook up our interrupt handler. */ if ((sc->sc_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, ADM5120_INTR, ADM5120_INTR, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq, INTR_TYPE_MISC, obio_intr, NULL, sc, &sc->sc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* Hook up our FAST interrupt handler. */ if ((sc->sc_fast_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, ADM5120_FAST_INTR, ADM5120_FAST_INTR, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_fast_irq, INTR_TYPE_MISC, obio_intr, NULL, sc, &sc->sc_fast_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } /* disable all interrupts */ REG_WRITE(ICU_ENABLE_REG, ICU_INT_MASK); bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); }
static int nexus_probe(device_t dev) { int cpuid; device_quiet(dev); /* suppress attach message for neatness */ for (cpuid = 0; cpuid < ncpus; ++cpuid) { struct rman *rm = &irq_rman[cpuid]; rm->rm_start = 0; rm->rm_end = IDT_HWI_VECTORS - 1; rm->rm_type = RMAN_ARRAY; rm->rm_descr = "Interrupt request lines"; if (rman_init(rm, cpuid)) panic("nexus_probe rman_init"); MachIntrABI.rman_setup(rm); } /* * ISA DMA on PCI systems is implemented in the ISA part of each * PCI->ISA bridge and the channels can be duplicated if there are * multiple bridges. (eg: laptops with docking stations) */ drq_rman.rm_start = 0; drq_rman.rm_end = 7; drq_rman.rm_type = RMAN_ARRAY; drq_rman.rm_descr = "DMA request lines"; /* XXX drq 0 not available on some machines */ if (rman_init(&drq_rman, -1) || rman_manage_region(&drq_rman, drq_rman.rm_start, drq_rman.rm_end)) panic("nexus_probe drq_rman"); /* * However, IO ports and Memory truely are global at this level, * as are APIC interrupts (however many IO APICS there turn out * to be on large systems..) */ port_rman.rm_start = 0; port_rman.rm_end = 0xffff; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman, -1) || rman_manage_region(&port_rman, 0, 0xffff)) panic("nexus_probe port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0u; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman, -1) || rman_manage_region(&mem_rman, 0, ~0)) panic("nexus_probe mem_rman"); return bus_generic_probe(dev); }
static int ofwbus_attach(device_t dev) { struct ofwbus_devinfo *ndi; struct ofwbus_softc *sc; device_t cdev; phandle_t node; sc = device_get_softc(dev); node = OF_peer(0); /* * If no Open Firmware, bail early */ if (node == -1) return (ENXIO); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "Interrupts"; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "Device Memory"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, ~0) != 0 || rman_manage_region(&sc->sc_mem_rman, 0, BUS_SPACE_MAXADDR) != 0) panic("%s: failed to set up rmans.", __func__); /* * Allow devices to identify. */ bus_generic_probe(dev); /* * Some important numbers */ sc->acells = 2; OF_getencprop(node, "#address-cells", &sc->acells, sizeof(sc->acells)); sc->scells = 1; OF_getencprop(node, "#size-cells", &sc->scells, sizeof(sc->scells)); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) { if ((ndi = ofwbus_setup_dinfo(dev, node)) == NULL) continue; cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ndi->ndi_obdinfo.obd_name); ofwbus_destroy_dinfo(ndi); continue; } device_set_ivars(cdev, ndi); } return (bus_generic_attach(dev)); }
static int nexus_probe(device_t dev) { phandle_t root; phandle_t child; device_t cdev; struct nexus_devinfo *dinfo; struct nexus_softc *sc; char *name, *type; if ((root = OF_peer(0)) == -1) panic("nexus_probe: OF_peer failed."); sc = device_get_softc(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "Interrupts"; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "UPA Device Memory"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, IV_MAX - 1) != 0 || rman_manage_region(&sc->sc_mem_rman, UPA_MEMSTART, UPA_MEMEND) != 0) panic("nexus_probe: failed to set up rmans"); for (child = OF_child(root); child != 0; child = OF_peer(child)) { if (child == -1) panic("nexus_probe(): OF_child failed."); if (OF_getprop_alloc(child, "name", 1, (void **)&name) == -1) continue; OF_getprop_alloc(child, "device_type", 1, (void **)&type); if (NEXUS_EXCLUDED(name, type)) { free(name, M_OFWPROP); if (type != NULL) free(type, M_OFWPROP); continue; } cdev = device_add_child(dev, NULL, -1); if (cdev != NULL) { dinfo = malloc(sizeof(*dinfo), M_NEXUS, M_WAITOK); dinfo->ndi_node = child; dinfo->ndi_name = name; dinfo->ndi_device_type = type; OF_getprop_alloc(child, "model", 1, (void **)&dinfo->ndi_model); dinfo->ndi_nreg = OF_getprop_alloc(child, "reg", sizeof(*dinfo->ndi_reg), (void **)&dinfo->ndi_reg); dinfo->ndi_ninterrupts = OF_getprop_alloc(child, "interrupts", sizeof(*dinfo->ndi_interrupts), (void **)&dinfo->ndi_interrupts); dinfo->ndi_bustag = &nexus_bustag; dinfo->ndi_dmatag = &nexus_dmatag; device_set_ivars(cdev, dinfo); } else free(name, M_OFWPROP); } device_set_desc(dev, "OpenFirmware Nexus device"); return (0); }
int pci_host_generic_core_attach(device_t dev) { struct generic_pcie_core_softc *sc; int error; int rid; sc = device_get_softc(dev); sc->dev = dev; /* Create the parent DMA tag to pass down the coherent flag */ error = bus_dma_tag_create(bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, bounds */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE, /* maxsize */ BUS_SPACE_UNRESTRICTED, /* nsegments */ BUS_SPACE_MAXSIZE, /* maxsegsize */ sc->coherent ? BUS_DMA_COHERENT : 0, /* flags */ NULL, NULL, /* lockfunc, lockarg */ &sc->dmat); if (error != 0) return (error); rid = 0; sc->res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (sc->res == NULL) { device_printf(dev, "could not map memory.\n"); return (ENXIO); } sc->bst = rman_get_bustag(sc->res); sc->bsh = rman_get_bushandle(sc->res); sc->mem_rman.rm_type = RMAN_ARRAY; sc->mem_rman.rm_descr = "PCIe Memory"; sc->io_rman.rm_type = RMAN_ARRAY; sc->io_rman.rm_descr = "PCIe IO window"; /* Initialize rman and allocate memory regions */ error = rman_init(&sc->mem_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } error = rman_init(&sc->io_rman); if (error) { device_printf(dev, "rman_init() failed. error = %d\n", error); return (error); } return (0); }
static int apb_attach(device_t dev) { struct apb_softc *sc = device_get_softc(dev); int rid = 0; device_set_desc(dev, "APB Bus bridge"); sc->apb_mem_rman.rm_type = RMAN_ARRAY; sc->apb_mem_rman.rm_descr = "APB memory window"; if (rman_init(&sc->apb_mem_rman) != 0 || rman_manage_region(&sc->apb_mem_rman, AR71XX_APB_BASE, AR71XX_APB_BASE + AR71XX_APB_SIZE - 1) != 0) panic("apb_attach: failed to set up memory rman"); sc->apb_irq_rman.rm_type = RMAN_ARRAY; sc->apb_irq_rman.rm_descr = "APB IRQ"; if (rman_init(&sc->apb_irq_rman) != 0 || rman_manage_region(&sc->apb_irq_rman, APB_IRQ_BASE, APB_IRQ_END) != 0) panic("apb_attach: failed to set up IRQ rman"); if ((sc->sc_misc_irq = bus_alloc_resource_any(dev, SYS_RES_IRQ, &rid, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_misc_irq, INTR_TYPE_MISC, apb_filter, NULL, sc, &sc->sc_misc_ih))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); /* * Unmask performance counter IRQ */ apb_unmask_irq((void*)APB_INTR_PMC); sc->sc_intr_counter[APB_INTR_PMC] = mips_intrcnt_create("apb irq5: pmc"); return (0); }
static int nexus_attach(device_t dev) { struct nexus_devinfo *ndi; struct nexus_softc *sc; device_t cdev; phandle_t node; if (strcmp(device_get_name(device_get_parent(dev)), "root") == 0) { node = OF_peer(0); if (node == -1) panic("%s: OF_peer failed.", __func__); sc = device_get_softc(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "Interrupts"; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "Device Memory"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, IV_MAX - 1) != 0 || rman_manage_region(&sc->sc_mem_rman, 0ULL, ~0ULL) != 0) panic("%s: failed to set up rmans.", __func__); } else node = ofw_bus_get_node(dev); /* * Allow devices to identify. */ bus_generic_probe(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) { if ((ndi = nexus_setup_dinfo(dev, node)) == NULL) continue; cdev = device_add_child(dev, NULL, -1); if (cdev == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ndi->ndi_obdinfo.obd_name); nexus_destroy_dinfo(ndi); continue; } device_set_ivars(cdev, ndi); } return (bus_generic_attach(dev)); }
int pxa_attach(device_t dev) { struct obio_softc *sc; struct obio_device *od; int i; device_t child; sc = device_get_softc(dev); sc->obio_bst = obio_tag; sc->obio_mem.rm_type = RMAN_ARRAY; sc->obio_mem.rm_descr = "PXA2X0 OBIO Memory"; if (rman_init(&sc->obio_mem) != 0) panic("pxa_attach: failed to init obio mem rman"); if (rman_manage_region(&sc->obio_mem, 0, PXA250_PERIPH_END) != 0) panic("pxa_attach: failed to set up obio mem rman"); sc->obio_irq.rm_type = RMAN_ARRAY; sc->obio_irq.rm_descr = "PXA2X0 OBIO IRQ"; if (rman_init(&sc->obio_irq) != 0) panic("pxa_attach: failed to init obio irq rman"); if (rman_manage_region(&sc->obio_irq, 0, 31) != 0) panic("pxa_attach: failed to set up obio irq rman (main irqs)"); if (rman_manage_region(&sc->obio_irq, IRQ_GPIO0, IRQ_GPIO_MAX) != 0) panic("pxa_attach: failed to set up obio irq rman (gpio irqs)"); for (od = obio_devices; od->od_name != NULL; od++) { resource_list_init(&od->od_resources); resource_list_add(&od->od_resources, SYS_RES_MEMORY, 0, od->od_base, od->od_base + od->od_size, od->od_size); for (i = 0; od->od_irqs[i] != 0; i++) { resource_list_add(&od->od_resources, SYS_RES_IRQ, i, od->od_irqs[i], od->od_irqs[i], 1); } child = device_add_child(dev, od->od_name, -1); device_set_ivars(child, od); } bus_generic_probe(dev); bus_generic_attach(dev); return (0); }
int gpiobus_init_softc(device_t dev) { struct gpiobus_softc *sc; sc = GPIOBUS_SOFTC(dev); sc->sc_busdev = dev; sc->sc_dev = device_get_parent(dev); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "GPIO Interrupts"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, ~0) != 0) panic("%s: failed to set up rman.", __func__); if (GPIO_PIN_MAX(sc->sc_dev, &sc->sc_npins) != 0) return (ENXIO); KASSERT(sc->sc_npins != 0, ("GPIO device with no pins")); /* Pins = GPIO_PIN_MAX() + 1 */ sc->sc_npins++; sc->sc_pins = malloc(sizeof(*sc->sc_pins) * sc->sc_npins, M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_pins == NULL) return (ENOMEM); /* Initialize the bus lock. */ GPIOBUS_LOCK_INIT(sc); return (0); }
static int wiibus_attach(device_t self) { struct wiibus_softc *sc; struct wiibus_devinfo *dinfo; device_t cdev; sc = device_get_softc(self); sc->sc_rman.rm_type = RMAN_ARRAY; sc->sc_rman.rm_descr = "Wii Bus Memory Mapped I/O"; rman_init(&sc->sc_rman); KASSERT(wiibus_sc == NULL, ("wiibus_sc already initialised")); wiibus_sc = sc; /* Nintendo PIC */ dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO); wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIPIC_REG_ADDR, WIIPIC_REG_LEN, 1); cdev = BUS_ADD_CHILD(self, 0, "wiipic", 0); device_set_ivars(cdev, dinfo); /* Framebuffer */ dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO); wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIFB_REG_ADDR, WIIFB_REG_LEN, 8); wiibus_init_device_resources(&sc->sc_rman, dinfo, 1, WIIFB_FB_ADDR, WIIFB_FB_LEN, 0); cdev = BUS_ADD_CHILD(self, 0, "wiifb", 0); device_set_ivars(cdev, dinfo); /* External Interface Bus */ dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO); wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIEXI_REG_ADDR, WIIEXI_REG_LEN, 4); cdev = BUS_ADD_CHILD(self, 0, "wiiexi", 0); device_set_ivars(cdev, dinfo); /* Nintendo IOS IPC */ dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO); wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIIPC_REG_ADDR, WIIIPC_REG_LEN, 14); wiibus_init_device_resources(&sc->sc_rman, dinfo, 1, WIIIPC_IOH_ADDR, WIIIPC_IOH_LEN, 0); cdev = BUS_ADD_CHILD(self, 0, "wiiipc", 0); device_set_ivars(cdev, dinfo); /* GPIO */ dinfo = malloc(sizeof(*dinfo), M_WIIBUS, M_WAITOK | M_ZERO); wiibus_init_device_resources(&sc->sc_rman, dinfo, 0, WIIGPIO_REG_ADDR, WIIGPIO_REG_LEN, 0); cdev = BUS_ADD_CHILD(self, 0, "wiigpio", 0); device_set_ivars(cdev, dinfo); /* The control registers */ sc->sc_tag = &bs_be_tag; sc->sc_handle = (bus_space_handle_t)pmap_mapdev(WIIBUS_CSR_ADDR, WIIBUS_CSR_LEN); return (bus_generic_attach(self)); }
static int ofwbus_attach(device_t dev) { struct ofwbus_softc *sc; phandle_t node; struct ofw_bus_devinfo obd; sc = device_get_softc(dev); node = OF_peer(0); /* * If no Open Firmware, bail early */ if (node == -1) return (ENXIO); /* * ofwbus bus starts on unamed node in FDT, so we cannot make * ofw_bus_devinfo from it. Pass node to simplebus_init directly. */ simplebus_init(dev, node); sc->sc_intr_rman.rm_type = RMAN_ARRAY; sc->sc_intr_rman.rm_descr = "Interrupts"; sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "Device Memory"; if (rman_init(&sc->sc_intr_rman) != 0 || rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_intr_rman, 0, ~0) != 0 || rman_manage_region(&sc->sc_mem_rman, 0, BUS_SPACE_MAXADDR) != 0) panic("%s: failed to set up rmans.", __func__); /* * Allow devices to identify. */ bus_generic_probe(dev); /* * Now walk the OFW tree and attach top-level devices. */ for (node = OF_child(node); node > 0; node = OF_peer(node)) { if (ofw_bus_gen_setup_devinfo(&obd, node) != 0) continue; simplebus_add_device(dev, node, 0, NULL, -1, NULL); } return (bus_generic_attach(dev)); }
static int i80321_pci_attach(device_t dev) { uint32_t busno; struct i80321_pci_softc *sc = device_get_softc(dev); sc->sc_st = i80321_softc->sc_st; sc->sc_atu_sh = i80321_softc->sc_atu_sh; busno = bus_space_read_4(sc->sc_st, sc->sc_atu_sh, ATU_PCIXSR); busno = PCIXSR_BUSNO(busno); if (busno == 0xff) busno = 0; sc->sc_dev = dev; sc->sc_busno = busno; sc->sc_pciio = &i80321_softc->sc_pci_iot; sc->sc_pcimem = &i80321_softc->sc_pci_memt; sc->sc_mem = i80321_softc->sc_owin[0].owin_xlate_lo + VERDE_OUT_XLATE_MEM_WIN_SIZE; sc->sc_io = i80321_softc->sc_iow_vaddr; /* Initialize memory and i/o rmans. */ sc->sc_io_rman.rm_type = RMAN_ARRAY; sc->sc_io_rman.rm_descr = "I80321 PCI I/O Ports"; if (rman_init(&sc->sc_io_rman) != 0 || rman_manage_region(&sc->sc_io_rman, sc->sc_io, sc->sc_io + VERDE_OUT_XLATE_IO_WIN_SIZE) != 0) { panic("i80321_pci_probe: failed to set up I/O rman"); } sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "I80321 PCI Memory"; if (rman_init(&sc->sc_mem_rman) != 0 || rman_manage_region(&sc->sc_mem_rman, 0, VERDE_OUT_XLATE_MEM_WIN_SIZE) != 0) { panic("i80321_pci_probe: failed to set up memory rman"); } sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "i80321 PCI IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 26, 32) != 0) panic("i80321_pci_probe: failed to set up IRQ rman"); device_add_child(dev, "pci",busno); return (bus_generic_attach(dev)); }
static int at91_attach(device_t dev) { struct at91_softc *sc = device_get_softc(dev); arm_post_filter = at91_eoi; at91_softc = sc; sc->sc_st = &at91_bs_tag; sc->sc_sh = AT91_BASE; sc->sc_aic_sh = AT91_BASE + AT91_SYS_BASE; sc->dev = dev; sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "AT91 IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 1, 31) != 0) panic("at91_attach: failed to set up IRQ rman"); sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "AT91 Memory"; if (rman_init(&sc->sc_mem_rman) != 0) panic("at91_attach: failed to set up memory rman"); /* * Manage the physical space, defined as being everything that isn't * DRAM. */ if (rman_manage_region(&sc->sc_mem_rman, 0, PHYSADDR - 1) != 0) panic("at91_attach: failed to set up memory rman"); if (rman_manage_region(&sc->sc_mem_rman, PHYSADDR + (256 << 20), 0xfffffffful) != 0) panic("at91_attach: failed to set up memory rman"); /* * Add this device's children... */ at91_cpu_add_builtin_children(dev, soc_info.soc_data->soc_children); soc_info.soc_data->soc_clock_init(); bus_generic_probe(dev); bus_generic_attach(dev); enable_interrupts(PSR_I | PSR_F); return (0); }
static int nexus_probe(device_t dev) { rtems_status_code status; int err; size_t i; device_set_desc(dev, "RTEMS Nexus device"); status = rtems_interrupt_server_initialize( BSD_TASK_PRIORITY_INTERRUPT, BSD_MINIMUM_TASK_STACK_SIZE, RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, NULL ); BSD_ASSERT(status == RTEMS_SUCCESSFUL); mem_rman.rm_start = 0; mem_rman.rm_end = ~0UL; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; err = rman_init(&mem_rman) != 0; BSD_ASSERT(err == 0); err = rman_manage_region(&mem_rman, mem_rman.rm_start, mem_rman.rm_end); BSD_ASSERT(err == 0); irq_rman.rm_start = 0; irq_rman.rm_end = ~0UL; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupt vectors"; err = rman_init(&irq_rman) != 0; BSD_ASSERT(err == 0); err = rman_manage_region(&irq_rman, irq_rman.rm_start, irq_rman.rm_end); BSD_ASSERT(err == 0); for (i = 0; i < rtems_bsd_nexus_device_count; ++i) { const rtems_bsd_device *nd = &rtems_bsd_nexus_devices[i]; device_add_child(dev, nd->name, nd->unit); } return (0); }
static int ciu_attach(device_t dev) { char name[MAXCOMLEN + 1]; struct ciu_softc *sc; unsigned i; int error; int rid; sc = device_get_softc(dev); rid = 0; sc->ciu_irq = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, CIU_IRQ_HARD, CIU_IRQ_HARD, 1, RF_ACTIVE); if (sc->ciu_irq == NULL) { device_printf(dev, "could not allocate irq%d\n", CIU_IRQ_HARD); return (ENXIO); } error = bus_setup_intr(dev, sc->ciu_irq, INTR_TYPE_MISC, ciu_intr, NULL, sc, NULL); if (error != 0) { device_printf(dev, "bus_setup_intr failed: %d\n", error); return (error); } sc->irq_rman.rm_type = RMAN_ARRAY; sc->irq_rman.rm_descr = "CIU IRQ"; error = rman_init(&sc->irq_rman); if (error != 0) return (error); /* * We have two contiguous IRQ regions, use a single rman. */ error = rman_manage_region(&sc->irq_rman, CIU_IRQ_EN0_BEGIN, CIU_IRQ_EN1_END); if (error != 0) return (error); for (i = 0; i < CIU_IRQ_EN0_COUNT; i++) { snprintf(name, sizeof name, "int%d:", i + CIU_IRQ_EN0_BEGIN); ciu_en0_intrcnt[i] = mips_intrcnt_create(name); } for (i = 0; i < CIU_IRQ_EN1_COUNT; i++) { snprintf(name, sizeof name, "int%d:", i + CIU_IRQ_EN1_BEGIN); ciu_en1_intrcnt[i] = mips_intrcnt_create(name); } bus_generic_probe(dev); bus_generic_attach(dev); return (0); }
static int octopci_attach(device_t dev) { struct octopci_softc *sc; int error; /* * XXX * We currently rely on U-Boot to set up the PCI in host state. We * should properly initialize the PCI bus here. */ sc = device_get_softc(dev); sc->sc_dev = dev; sc->sc_domain = 0; sc->sc_bus = 0; sc->sc_io.rm_type = RMAN_ARRAY; sc->sc_io.rm_descr = "Cavium Octeon PCI I/O Ports"; error = rman_init(&sc->sc_io); if (error != 0) return (error); error = rman_manage_region(&sc->sc_io, CVMX_OCT_PCI_IO_BASE, CVMX_OCT_PCI_IO_BASE + CVMX_OCT_PCI_IO_SIZE); if (error != 0) return (error); sc->sc_mem1.rm_type = RMAN_ARRAY; sc->sc_mem1.rm_descr = "Cavium Octeon PCI Memory"; error = rman_init(&sc->sc_mem1); if (error != 0) return (error); error = rman_manage_region(&sc->sc_mem1, CVMX_OCT_PCI_MEM1_BASE, CVMX_OCT_PCI_MEM1_BASE + CVMX_OCT_PCI_MEM1_SIZE); if (error != 0) return (error); device_add_child(dev, "pci", 0); return (bus_generic_attach(dev)); }
static int obio_attach(device_t dev) { struct obio_softc *sc = device_get_softc(dev); int rid, irq; sc->oba_mem_rman.rm_type = RMAN_ARRAY; sc->oba_mem_rman.rm_descr = "OBIO memeory"; if (rman_init(&sc->oba_mem_rman) != 0 || rman_manage_region(&sc->oba_mem_rman, OBIO_MEM_START, OBIO_MEM_START + OBIO_MEM_SIZE) != 0) panic("obio_attach: failed to set up I/O rman"); sc->oba_irq_rman.rm_type = RMAN_ARRAY; sc->oba_irq_rman.rm_descr = "OBIO IRQ"; if (rman_init(&sc->oba_irq_rman) != 0 || rman_manage_region(&sc->oba_irq_rman, IRQ_BASE, IRQ_END) != 0) panic("obio_attach: failed to set up IRQ rman"); /* Hook up our interrupt handlers. We should handle IRQ0..IRQ4*/ for(irq = 0; irq < 5; irq++) { if ((sc->sc_irq[irq] = bus_alloc_resource(dev, SYS_RES_IRQ, &rid, irq, irq, 1, RF_SHAREABLE | RF_ACTIVE)) == NULL) { device_printf(dev, "unable to allocate IRQ resource\n"); return (ENXIO); } if ((bus_setup_intr(dev, sc->sc_irq[irq], INTR_TYPE_MISC, obio_intr, NULL, sc, &sc->sc_ih[irq]))) { device_printf(dev, "WARNING: unable to register interrupt handler\n"); return (ENXIO); } } bus_generic_probe(dev); bus_enumerate_hinted_children(dev); bus_generic_attach(dev); return (0); }
static int nexus_probe(device_t dev) { static const char name[] = "IRQS"; rtems_status_code status; int err; const rtems_bsd_device *nd; device_set_desc(dev, "RTEMS Nexus device"); #ifndef DISABLE_INTERRUPT_EXTENSION status = rtems_interrupt_server_initialize( rtems_bsd_get_task_priority(name), rtems_bsd_get_task_stack_size(name), RTEMS_DEFAULT_MODES, RTEMS_DEFAULT_ATTRIBUTES, NULL ); BSD_ASSERT(status == RTEMS_SUCCESSFUL); #endif mem_rman.rm_start = 0; mem_rman.rm_end = ~0UL; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; err = rman_init(&mem_rman) != 0; BSD_ASSERT(err == 0); err = rman_manage_region(&mem_rman, mem_rman.rm_start, mem_rman.rm_end); BSD_ASSERT(err == 0); irq_rman.rm_start = 0; irq_rman.rm_end = ~0UL; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupt vectors"; err = rman_init(&irq_rman) != 0; BSD_ASSERT(err == 0); err = rman_manage_region(&irq_rman, irq_rman.rm_start, irq_rman.rm_end); BSD_ASSERT(err == 0); SET_FOREACH(nd, nexus) { device_add_child(dev, nd->name, nd->unit); }
static void xlp_pcib_init_resources(void) { irq_rman.rm_start = 0; irq_rman.rm_end = 255; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "PCI Mapped Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, 255)) panic("pci_init_resources irq_rman"); port_rman.rm_start = 0; port_rman.rm_end = ~0ul; port_rman.rm_type = RMAN_ARRAY; port_rman.rm_descr = "I/O ports"; if (rman_init(&port_rman) || rman_manage_region(&port_rman, PCIE_IO_BASE, PCIE_IO_LIMIT)) panic("pci_init_resources port_rman"); mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, PCIE_MEM_BASE, PCIE_MEM_LIMIT)) panic("pci_init_resources mem_rman"); /* * This includes the GBU (nor flash) memory range and the PCIe * memory area. */ emul_rman.rm_start = 0; emul_rman.rm_end = ~0ul; emul_rman.rm_type = RMAN_ARRAY; emul_rman.rm_descr = "Emulated MEMIO"; if (rman_init(&emul_rman) || rman_manage_region(&emul_rman, EMUL_MEM_START, EMUL_MEM_END)) panic("pci_init_resources emul_rman"); }
static int nexus_attach(device_t dev) { mem_rman.rm_start = 0; mem_rman.rm_end = ~0ul; mem_rman.rm_type = RMAN_ARRAY; mem_rman.rm_descr = "I/O memory addresses"; if (rman_init(&mem_rman) || rman_manage_region(&mem_rman, 0, ~0)) panic("nexus_attach mem_rman"); irq_rman.rm_start = 0; irq_rman.rm_end = ~0ul; irq_rman.rm_type = RMAN_ARRAY; irq_rman.rm_descr = "Interrupts"; if (rman_init(&irq_rman) || rman_manage_region(&irq_rman, 0, ~0)) panic("nexus_attach irq_rman"); bus_generic_probe(dev); bus_generic_attach(dev); return (0); }
/** * omap_attach */ static int omap_attach(device_t dev) { struct omap_softc *sc = device_get_softc(dev); const struct pmap_devmap *pdevmap; sc->sc_iotag = &omap_bs_tag; sc->sc_dev = dev; /* Set all interrupts as the resource */ sc->sc_irq_rman.rm_type = RMAN_ARRAY; sc->sc_irq_rman.rm_descr = "OMAP IRQs"; if (rman_init(&sc->sc_irq_rman) != 0 || rman_manage_region(&sc->sc_irq_rman, 0, NIRQ) != 0) { panic("%s: failed to set up IRQ rman", __func__); } /* Setup the memory map based on initial device map in *_machdep.c */ sc->sc_mem_rman.rm_type = RMAN_ARRAY; sc->sc_mem_rman.rm_descr = "OMAP Memory"; if (rman_init(&sc->sc_mem_rman) != 0) panic("%s: failed to set up memory rman", __func__); for (pdevmap = omap_devmap; pdevmap->pd_va != 0; pdevmap++) { if (rman_manage_region(&sc->sc_mem_rman, pdevmap->pd_pa, pdevmap->pd_pa + pdevmap->pd_size - 1) != 0) { panic("%s: failed to set up memory regions", __func__); } } /* The device list will be created by the 'cpu' device when it is identified */ bus_generic_probe(dev); bus_generic_attach(dev); enable_interrupts(I32_bit | F32_bit); return (0); }