static int gem_sbus_attach(device_t dev) { struct gem_softc *sc; int burst; uint32_t val; sc = device_get_softc(dev); sc->sc_variant = GEM_SUN_GEM; sc->sc_dev = dev; /* All known SBus models use a SERDES. */ sc->sc_flags = GEM_SERDES; if (bus_alloc_resources(dev, gem_sbus_res_spec, sc->sc_res)) { device_printf(dev, "failed to allocate resources\n"); bus_release_resources(dev, gem_sbus_res_spec, sc->sc_res); return (ENXIO); } GEM_LOCK_INIT(sc, device_get_nameunit(dev)); OF_getetheraddr(dev, sc->sc_enaddr); burst = sbus_get_burstsz(dev); val = GEM_SBUS_CFG_PARITY; if ((burst & SBUS_BURST64_MASK) != 0) { val |= GEM_SBUS_CFG_64BIT; burst >>= SBUS_BURST64_SHIFT; }
static int dma_attach(device_t dev) { struct dma_softc *dsc; struct lsi64854_softc *lsc; struct dma_devinfo *ddi; device_t cdev; const char *name; char *cabletype; uint32_t csr; phandle_t child, node; int error, i; dsc = device_get_softc(dev); lsc = &dsc->sc_lsi64854; name = ofw_bus_get_name(dev); node = ofw_bus_get_node(dev); dsc->sc_ign = sbus_get_ign(dev); dsc->sc_slot = sbus_get_slot(dev); i = 0; lsc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (lsc->sc_res == NULL) { device_printf(dev, "cannot allocate resources\n"); return (ENXIO); } if (strcmp(name, "espdma") == 0 || strcmp(name, "dma") == 0) lsc->sc_channel = L64854_CHANNEL_SCSI; else if (strcmp(name, "ledma") == 0) { /* * Check to see which cable type is currently active and * set the appropriate bit in the ledma csr so that it * gets used. If we didn't netboot, the PROM won't have * the "cable-selection" property; default to TP and then * the user can change it via a "media" option to ifconfig. */ csr = L64854_GCSR(lsc); if ((OF_getprop_alloc(node, "cable-selection", 1, (void **)&cabletype)) == -1) { /* assume TP if nothing there */ csr |= E_TP_AUI; } else { if (strcmp(cabletype, "aui") == 0) csr &= ~E_TP_AUI; else csr |= E_TP_AUI; free(cabletype, M_OFWPROP); } L64854_SCSR(lsc, csr); DELAY(20000); /* manual says we need a 20ms delay */ lsc->sc_channel = L64854_CHANNEL_ENET; } else { device_printf(dev, "unsupported DMA channel\n"); error = ENXIO; goto fail_lres; } error = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* no locking */ &lsc->sc_parent_dmat); if (error != 0) { device_printf(dev, "cannot allocate parent DMA tag\n"); goto fail_lres; } i = sbus_get_burstsz(dev); lsc->sc_burst = (i & SBUS_BURST_32) ? 32 : (i & SBUS_BURST_16) ? 16 : 0; lsc->sc_dev = dev; /* Attach children. */ i = 0; for (child = OF_child(node); child != 0; child = OF_peer(child)) { if ((ddi = dma_setup_dinfo(dev, dsc, child)) == NULL) continue; if (i != 0) { device_printf(dev, "<%s>: only one child per DMA channel supported\n", ddi->ddi_obdinfo.obd_name); dma_destroy_dinfo(ddi); continue; } if ((cdev = device_add_child(dev, NULL, -1)) == NULL) { device_printf(dev, "<%s>: device_add_child failed\n", ddi->ddi_obdinfo.obd_name); dma_destroy_dinfo(ddi); continue; } device_set_ivars(cdev, ddi); i++; } return (bus_generic_attach(dev)); fail_lres: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(lsc->sc_res), lsc->sc_res); return (error); }
static int hme_sbus_attach(device_t dev) { struct hme_sbus_softc *hsc; struct hme_softc *sc; u_long start, count; uint32_t burst; int i, error = 0; hsc = device_get_softc(dev); sc = &hsc->hsc_hme; mtx_init(&sc->sc_lock, device_get_nameunit(dev), MTX_NETWORK_LOCK, MTX_DEF); /* * Map five register banks: * * bank 0: HME SEB registers * bank 1: HME ETX registers * bank 2: HME ERX registers * bank 3: HME MAC registers * bank 4: HME MIF registers * */ i = 0; hsc->hsc_seb_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_seb_res == NULL) { device_printf(dev, "cannot map SEB registers\n"); error = ENXIO; goto fail_mtx_res; } sc->sc_sebt = rman_get_bustag(hsc->hsc_seb_res); sc->sc_sebh = rman_get_bushandle(hsc->hsc_seb_res); i = 1; hsc->hsc_etx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_etx_res == NULL) { device_printf(dev, "cannot map ETX registers\n"); error = ENXIO; goto fail_seb_res; } sc->sc_etxt = rman_get_bustag(hsc->hsc_etx_res); sc->sc_etxh = rman_get_bushandle(hsc->hsc_etx_res); i = 2; hsc->hsc_erx_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_erx_res == NULL) { device_printf(dev, "cannot map ERX registers\n"); error = ENXIO; goto fail_etx_res; } sc->sc_erxt = rman_get_bustag(hsc->hsc_erx_res); sc->sc_erxh = rman_get_bushandle(hsc->hsc_erx_res); i = 3; hsc->hsc_mac_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_mac_res == NULL) { device_printf(dev, "cannot map MAC registers\n"); error = ENXIO; goto fail_erx_res; } sc->sc_mact = rman_get_bustag(hsc->hsc_mac_res); sc->sc_mach = rman_get_bushandle(hsc->hsc_mac_res); /* * At least on some HMEs, the MIF registers seem to be inside the MAC * range, so try to kludge around it. */ i = 4; hsc->hsc_mif_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE); if (hsc->hsc_mif_res == NULL) { if (bus_get_resource(dev, SYS_RES_MEMORY, i, &start, &count) != 0) { device_printf(dev, "cannot get MIF registers\n"); error = ENXIO; goto fail_mac_res; } if (start < rman_get_start(hsc->hsc_mac_res) || start + count - 1 > rman_get_end(hsc->hsc_mac_res)) { device_printf(dev, "cannot move MIF registers to MAC " "bank\n"); error = ENXIO; goto fail_mac_res; } sc->sc_mift = sc->sc_mact; bus_space_subregion(sc->sc_mact, sc->sc_mach, start - rman_get_start(hsc->hsc_mac_res), count, &sc->sc_mifh); } else { sc->sc_mift = rman_get_bustag(hsc->hsc_mif_res); sc->sc_mifh = rman_get_bushandle(hsc->hsc_mif_res); } i = 0; hsc->hsc_ires = bus_alloc_resource_any(dev, SYS_RES_IRQ, &i, RF_SHAREABLE | RF_ACTIVE); if (hsc->hsc_ires == NULL) { device_printf(dev, "could not allocate interrupt\n"); error = ENXIO; goto fail_mif_res; } OF_getetheraddr(dev, sc->sc_enaddr); burst = sbus_get_burstsz(dev); /* Translate into plain numerical format */ if ((burst & SBUS_BURST_64)) sc->sc_burst = 64; else if ((burst & SBUS_BURST_32)) sc->sc_burst = 32; else if ((burst & SBUS_BURST_16)) sc->sc_burst = 16; else sc->sc_burst = 0; sc->sc_dev = dev; sc->sc_flags = 0; if ((error = hme_config(sc)) != 0) { device_printf(dev, "could not be configured\n"); goto fail_ires; } if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET | INTR_MPSAFE, NULL, hme_intr, sc, &hsc->hsc_ih)) != 0) { device_printf(dev, "couldn't establish interrupt\n"); hme_detach(sc); goto fail_ires; } return (0); fail_ires: bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(hsc->hsc_ires), hsc->hsc_ires); fail_mif_res: if (hsc->hsc_mif_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_mif_res), hsc->hsc_mif_res); } fail_mac_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_mac_res), hsc->hsc_mac_res); fail_erx_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_erx_res), hsc->hsc_erx_res); fail_etx_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_etx_res), hsc->hsc_etx_res); fail_seb_res: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(hsc->hsc_seb_res), hsc->hsc_seb_res); fail_mtx_res: mtx_destroy(&sc->sc_lock); return (error); }
static int esp_sbus_attach(device_t dev) { struct esp_softc *esc; struct ncr53c9x_softc *sc; struct lsi64854_softc *lsc; device_t *children; int error, i, nchildren; esc = device_get_softc(dev); sc = &esc->sc_ncr53c9x; lsc = NULL; esc->sc_dev = dev; sc->sc_freq = sbus_get_clockfreq(dev); if (strcmp(ofw_bus_get_name(dev), "SUNW,fas") == 0) { /* * Allocate space for DMA, in SUNW,fas there are no * separate DMA devices. */ lsc = malloc(sizeof (struct lsi64854_softc), M_DEVBUF, M_NOWAIT | M_ZERO); if (lsc == NULL) { device_printf(dev, "out of memory (lsi64854_softc)\n"); return (ENOMEM); } esc->sc_dma = lsc; /* * SUNW,fas have 2 register spaces: DMA (lsi64854) and * SCSI core (ncr53c9x). */ /* Allocate DMA registers. */ i = 0; if ((lsc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate DMA registers\n"); error = ENXIO; goto fail_sbus_lsc; } /* Create a parent DMA tag based on this bus. */ error = bus_dma_tag_create( bus_get_dma_tag(dev), /* parent */ 1, 0, /* alignment, boundary */ BUS_SPACE_MAXADDR, /* lowaddr */ BUS_SPACE_MAXADDR, /* highaddr */ NULL, NULL, /* filter, filterarg */ BUS_SPACE_MAXSIZE_32BIT, /* maxsize */ 0, /* nsegments */ BUS_SPACE_MAXSIZE_32BIT, /* maxsegsize */ 0, /* flags */ NULL, NULL, /* no locking */ &lsc->sc_parent_dmat); if (error != 0) { device_printf(dev, "cannot allocate parent DMA tag\n"); goto fail_sbus_lres; } i = sbus_get_burstsz(dev); #ifdef ESP_SBUS_DEBUG printf("%s: burst 0x%x\n", __func__, i); #endif lsc->sc_burst = (i & SBUS_BURST_32) ? 32 : (i & SBUS_BURST_16) ? 16 : 0; lsc->sc_channel = L64854_CHANNEL_SCSI; lsc->sc_client = sc; lsc->sc_dev = dev; /* * Allocate SCSI core registers. */ i = 1; if ((esc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate SCSI core registers\n"); error = ENXIO; goto fail_sbus_lpdma; } } else { /* * Search accompanying DMA engine. It should have been * already attached otherwise there isn't much we can do. */ if (device_get_children(device_get_parent(dev), &children, &nchildren) != 0) { device_printf(dev, "cannot determine siblings\n"); return (ENXIO); } for (i = 0; i < nchildren; i++) { if (device_is_attached(children[i]) && sbus_get_slot(children[i]) == sbus_get_slot(dev) && strcmp(ofw_bus_get_name(children[i]), "dma") == 0) { /* XXX hackery */ esc->sc_dma = (struct lsi64854_softc *) device_get_softc(children[i]); break; } } free(children, M_TEMP); if (esc->sc_dma == NULL) { device_printf(dev, "cannot find DMA engine\n"); return (ENXIO); } esc->sc_dma->sc_client = sc; /* * Allocate SCSI core registers. */ i = 0; if ((esc->sc_res = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &i, RF_ACTIVE)) == NULL) { device_printf(dev, "cannot allocate SCSI core registers\n"); return (ENXIO); } } error = espattach(esc, &esp_sbus_glue); if (error != 0) { device_printf(dev, "espattach failed\n"); goto fail_sbus_eres; } return (0); fail_sbus_eres: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(esc->sc_res), esc->sc_res); if (strcmp(ofw_bus_get_name(dev), "SUNW,fas") != 0) return (error); fail_sbus_lpdma: bus_dma_tag_destroy(lsc->sc_parent_dmat); fail_sbus_lres: bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(lsc->sc_res), lsc->sc_res); fail_sbus_lsc: free(lsc, M_DEVBUF); return (error); }
static int hme_sbus_attach(device_t dev) { struct hme_sbus_softc *hsc = device_get_softc(dev); struct hme_softc *sc = &hsc->hsc_hme; u_int32_t burst; u_long start, count; int error; /* * Map five register banks: * * bank 0: HME SEB registers * bank 1: HME ETX registers * bank 2: HME ERX registers * bank 3: HME MAC registers * bank 4: HME MIF registers * */ sc->sc_sebo = sc->sc_etxo = sc->sc_erxo = sc->sc_maco = sc->sc_mifo = 0; hsc->hsc_seb_rid = 0; hsc->hsc_seb_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &hsc->hsc_seb_rid, 0, ~0, 1, RF_ACTIVE); if (hsc->hsc_seb_res == NULL) { device_printf(dev, "cannot map SEB registers\n"); return (ENXIO); } sc->sc_sebt = rman_get_bustag(hsc->hsc_seb_res); sc->sc_sebh = rman_get_bushandle(hsc->hsc_seb_res); hsc->hsc_etx_rid = 1; hsc->hsc_etx_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &hsc->hsc_etx_rid, 0, ~0, 1, RF_ACTIVE); if (hsc->hsc_etx_res == NULL) { device_printf(dev, "cannot map ETX registers\n"); goto fail_seb_res; } sc->sc_etxt = rman_get_bustag(hsc->hsc_etx_res); sc->sc_etxh = rman_get_bushandle(hsc->hsc_etx_res); hsc->hsc_erx_rid = 2; hsc->hsc_erx_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &hsc->hsc_erx_rid, 0, ~0, 1, RF_ACTIVE); if (hsc->hsc_erx_res == NULL) { device_printf(dev, "cannot map ERX registers\n"); goto fail_etx_res; } sc->sc_erxt = rman_get_bustag(hsc->hsc_erx_res); sc->sc_erxh = rman_get_bushandle(hsc->hsc_erx_res); hsc->hsc_mac_rid = 3; hsc->hsc_mac_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &hsc->hsc_mac_rid, 0, ~0, 1, RF_ACTIVE); if (hsc->hsc_mac_res == NULL) { device_printf(dev, "cannot map MAC registers\n"); goto fail_erx_res; } sc->sc_mact = rman_get_bustag(hsc->hsc_mac_res); sc->sc_mach = rman_get_bushandle(hsc->hsc_mac_res); /* * At least on some HMEs, the MIF registers seem to be inside the MAC * range, so map try to kluge around it. */ hsc->hsc_mif_rid = 4; hsc->hsc_mif_res = bus_alloc_resource(dev, SYS_RES_MEMORY, &hsc->hsc_mif_rid, 0, ~0, 1, RF_ACTIVE); if (hsc->hsc_mif_res == NULL) { if (bus_get_resource(dev, SYS_RES_MEMORY, hsc->hsc_mif_rid, &start, &count) != 0) { device_printf(dev, "cannot get MIF registers\n"); goto fail_mac_res; } if (start < rman_get_start(hsc->hsc_mac_res) || start + count - 1 > rman_get_end(hsc->hsc_mac_res)) { device_printf(dev, "cannot move MIF registers to MAC " "bank\n"); goto fail_mac_res; } sc->sc_mift = sc->sc_mact; sc->sc_mifh = sc->sc_mach; sc->sc_mifo = sc->sc_maco + start - rman_get_start(hsc->hsc_mac_res); } else { sc->sc_mift = rman_get_bustag(hsc->hsc_mif_res); sc->sc_mifh = rman_get_bushandle(hsc->hsc_mif_res); } hsc->hsc_irid = 0; hsc->hsc_ires = bus_alloc_resource(dev, SYS_RES_IRQ, &hsc->hsc_irid, 0, ~0, 1, RF_SHAREABLE | RF_ACTIVE); if (hsc->hsc_ires == NULL) { device_printf(dev, "could not allocate interrupt\n"); error = ENXIO; goto fail_mif_res; } OF_getetheraddr(dev, sc->sc_arpcom.ac_enaddr); burst = sbus_get_burstsz(dev); /* Translate into plain numerical format */ sc->sc_burst = (burst & SBUS_BURST_32) ? 32 : (burst & SBUS_BURST_16) ? 16 : 0; sc->sc_pci = 0; /* XXX: should all be done in bus_dma. */ sc->sc_dev = dev; if ((error = hme_config(sc)) != 0) { device_printf(dev, "could not be configured\n"); goto fail_ires; } if ((error = bus_setup_intr(dev, hsc->hsc_ires, INTR_TYPE_NET, hme_intr, sc, &hsc->hsc_ih)) != 0) { device_printf(dev, "couldn't establish interrupt\n"); goto fail_ires; } return (0); fail_ires: bus_release_resource(dev, SYS_RES_IRQ, hsc->hsc_irid, hsc->hsc_ires); fail_mif_res: if (hsc->hsc_mif_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_mif_rid, hsc->hsc_mif_res); } fail_mac_res: bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_mac_rid, hsc->hsc_mac_res); fail_erx_res: bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_erx_rid, hsc->hsc_erx_res); fail_etx_res: bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_etx_rid, hsc->hsc_etx_res); fail_seb_res: bus_release_resource(dev, SYS_RES_MEMORY, hsc->hsc_seb_rid, hsc->hsc_seb_res); return (ENXIO); }