/* * Attach the mainbus. */ void mainbus_attach(device_t parent, device_t self, void *aux) { struct pcibus_attach_args pba; #if NPCI > 0 struct genppc_pci_chipset_businfo *pbi; #endif #ifdef PCI_NETBSD_CONFIGURE struct extent *ioext, *memext; #endif mainbus_found = 1; aprint_normal("\n"); /* attach cpu */ config_found_ia(self, "mainbus", NULL, mainbus_print); /* * XXX Note also that the presence of a PCI bus should * XXX _always_ be checked, and if present the bus should be * XXX 'found'. However, because of the structure of the code, * XXX that's not currently possible. */ #if NPCI > 0 genppc_pct = malloc(sizeof(struct genppc_pci_chipset), M_DEVBUF, M_NOWAIT); KASSERT(genppc_pct != NULL); mvmeppc_pci_get_chipset_tag(genppc_pct); pbi = malloc(sizeof(struct genppc_pci_chipset_businfo), M_DEVBUF, M_NOWAIT); KASSERT(pbi != NULL); pbi->pbi_properties = prop_dictionary_create(); KASSERT(pbi->pbi_properties != NULL); SIMPLEQ_INIT(&genppc_pct->pc_pbi); SIMPLEQ_INSERT_TAIL(&genppc_pct->pc_pbi, pbi, next); #ifdef PCI_NETBSD_CONFIGURE ioext = extent_create("pciio", 0x00008000, 0x0000ffff, NULL, 0, EX_NOWAIT); memext = extent_create("pcimem", 0x00000000, 0x0fffffff, NULL, 0, EX_NOWAIT); pci_configure_bus(genppc_pct, ioext, memext, NULL, 0, CACHELINESIZE); extent_destroy(ioext); extent_destroy(memext); #endif pba.pba_iot = &prep_io_space_tag; pba.pba_memt = &prep_mem_space_tag; pba.pba_dmat = &pci_bus_dma_tag; pba.pba_dmat64 = NULL; pba.pba_pc = genppc_pct; pba.pba_bus = 0; pba.pba_bridgetag = NULL; pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY; config_found_ia(self, "pcibus", &pba, pcibusprint); #endif }
void recurse_dir(MediaScan *s, const char *path, int recurse_count) { char *dir, *p; #if defined(__unix__) || defined(__unix) // char *realdir; #endif char tmp_full_path[MAX_PATH_STR_LEN]; DIR *dirp; struct dirent *dp; struct dirq *subdirq; // list of subdirs of the current directory struct dirq_entry *parent_entry = NULL; // entry for current dir in s->_dirq char redirect_dir[MAX_PATH_STR_LEN]; if (recurse_count > RECURSE_LIMIT) { LOG_ERROR("Hit recurse limit of %d scanning path %s\n", RECURSE_LIMIT, path); return; } if (path[0] != '/') { // XXX Win32 // Get full path char *buf = (char *)malloc((size_t)MAX_PATH_STR_LEN); if (buf == NULL) { FATAL("Out of memory for directory scan\n"); return; } dir = getcwd(buf, (size_t)MAX_PATH_STR_LEN); strcat(dir, "/"); strcat(dir, path); } else { #ifdef USING_TCMALLOC // strdup will cause tcmalloc to crash on free dir = (char *)malloc((size_t)MAX_PATH_STR_LEN); strcpy(dir, path); #else dir = strdup(path); #endif } // Strip trailing slash if any p = &dir[0]; while (*p != 0) { if (p[1] == 0 && *p == '/') *p = 0; p++; } LOG_INFO("Recursed into %s\n", dir); #if (defined(__APPLE__) && defined(__MACH__)) if (isAlias(dir)) { if (CheckMacAlias(dir, redirect_dir)) { LOG_INFO("Resolving Alias %s to %s\n", dir, redirect_dir); strcpy(dir, redirect_dir); } else { LOG_ERROR("Failure to follow symlink or alias, skipping directory\n"); goto out; } } #elif (defined(__unix__) || defined(__unix)) if (isAlias(dir)) { FollowLink(dir, redirect_dir); LOG_INFO("Resolving symlink %s to %s\n", dir, redirect_dir); // realdir = redirect_dir; } else { // realdir = dir; } #endif if ((dirp = opendir(dir)) == NULL) { LOG_ERROR("Unable to open directory %s: %s\n", dir, strerror(errno)); goto out; } subdirq = malloc(sizeof(struct dirq)); SIMPLEQ_INIT(subdirq); while ((dp = readdir(dirp)) != NULL) { char *name = dp->d_name; // skip all dot files if (name[0] != '.') { // Check if scan should be aborted if (unlikely(s->_want_abort)) break; // Construct full path //*tmp_full_path = 0; strcpy(tmp_full_path, dir); strcat(tmp_full_path, "/"); strcat(tmp_full_path, name); // XXX some platforms may be missing d_type/DT_DIR #if (defined(__APPLE__) && defined(__MACH__)) || (defined(__unix__) || defined(__unix)) && !defined(__sun__) if (dp->d_type == DT_DIR) { #elif defined(__sun__) if (PathIsDirectory(tmp_full_path)) { #endif // Add to list of subdirectories we need to recurse into struct dirq_entry *subdir_entry = malloc(sizeof(struct dirq_entry)); if (_should_scan_dir(s, tmp_full_path)) { subdir_entry->dir = strdup(tmp_full_path); SIMPLEQ_INSERT_TAIL(subdirq, subdir_entry, entries); LOG_INFO(" subdir: %s\n", tmp_full_path); } else { LOG_INFO(" skipping subdir: %s\n", tmp_full_path); } } else { enum media_type type = _should_scan(s, name); LOG_INFO("name %s = type %d\n", name, type); if (type) { struct fileq_entry *entry; // Check if this file is a shortcut and if so resolve it #if (defined(__APPLE__) && defined(__MACH__)) if (isAlias(name)) { char full_name[MAX_PATH_STR_LEN]; LOG_INFO("Mac Alias detected\n"); strcpy(full_name, dir); strcat(full_name, "\\"); strcat(full_name, name); parse_lnk(full_name, redirect_dir, MAX_PATH_STR_LEN); if (PathIsDirectory(redirect_dir)) { struct dirq_entry *subdir_entry = malloc(sizeof(struct dirq_entry)); subdir_entry->dir = strdup(redirect_dir); SIMPLEQ_INSERT_TAIL(subdirq, subdir_entry, entries); LOG_INFO(" subdir: %s\n", tmp_full_path); type = 0; } } #elif (defined(__unix__) || defined(__unix)) if (isAlias(name)) { char full_name[MAX_PATH_STR_LEN]; printf("Unix alias detected for %s\n", name); strcpy(full_name, dir); strcat(full_name, "/"); strcat(full_name, name); FollowLink(full_name, redirect_dir); if (PathIsDirectory(redirect_dir)) { struct dirq_entry *subdir_entry = malloc(sizeof(struct dirq_entry)); subdir_entry->dir = strdup(full_name); SIMPLEQ_INSERT_TAIL(subdirq, subdir_entry, entries); LOG_INFO(" subdir: %s\n", tmp_full_path); type = 0; } } #endif if (parent_entry == NULL) { // Add parent directory to list of dirs with files parent_entry = malloc(sizeof(struct dirq_entry)); parent_entry->dir = strdup(dir); parent_entry->files = malloc(sizeof(struct fileq)); SIMPLEQ_INIT(parent_entry->files); SIMPLEQ_INSERT_TAIL((struct dirq *)s->_dirq, parent_entry, entries); } // Add scannable file to this directory list entry = malloc(sizeof(struct fileq_entry)); entry->file = strdup(name); entry->type = type; SIMPLEQ_INSERT_TAIL(parent_entry->files, entry, entries); s->progress->total++; LOG_INFO(" [%5d] file: %s\n", s->progress->total, entry->file); } } } } closedir(dirp); // Send progress update if (s->on_progress && !s->_want_abort) if (progress_update(s->progress, dir)) send_progress(s); // process subdirs while (!SIMPLEQ_EMPTY(subdirq)) { struct dirq_entry *subdir_entry = SIMPLEQ_FIRST(subdirq); SIMPLEQ_REMOVE_HEAD(subdirq, entries); if (!s->_want_abort) recurse_dir(s, subdir_entry->dir, recurse_count); free(subdir_entry); } free(subdirq); out: free(dir); }
/* * Attach the mainbus. */ void mainbus_attach(device_t parent, device_t self, void *aux) { union mainbus_attach_args mba; struct confargs ca; #if NPCI > 0 struct genppc_pci_chipset_businfo *pbi; #ifdef PCI_NETBSD_CONFIGURE struct extent *ioext, *memext; #endif #endif mainbus_found = 1; aprint_normal("\n"); #if defined(RESIDUAL_DATA_DUMP) print_residual_device_info(); #endif /* * Always find the CPU */ ca.ca_name = "cpu"; ca.ca_node = 0; config_found_ia(self, "mainbus", &ca, mainbus_print); ca.ca_name = "cpu"; ca.ca_node = 1; config_found_ia(self, "mainbus", &ca, mainbus_print); /* * XXX Note also that the presence of a PCI bus should * XXX _always_ be checked, and if present the bus should be * XXX 'found'. However, because of the structure of the code, * XXX that's not currently possible. */ #if NPCI > 0 genppc_pct = malloc(sizeof(struct genppc_pci_chipset), M_DEVBUF, M_NOWAIT); KASSERT(genppc_pct != NULL); bebox_pci_get_chipset_tag(genppc_pct); pbi = malloc(sizeof(struct genppc_pci_chipset_businfo), M_DEVBUF, M_NOWAIT); KASSERT(pbi != NULL); pbi->pbi_properties = prop_dictionary_create(); KASSERT(pbi->pbi_properties != NULL); SIMPLEQ_INIT(&genppc_pct->pc_pbi); SIMPLEQ_INSERT_TAIL(&genppc_pct->pc_pbi, pbi, next); #ifdef PCI_NETBSD_CONFIGURE ioext = extent_create("pciio", 0x00008000, 0x0000ffff, NULL, 0, EX_NOWAIT); memext = extent_create("pcimem", 0x00000000, 0x0fffffff, NULL, 0, EX_NOWAIT); pci_configure_bus(genppc_pct, ioext, memext, NULL, 0, CACHELINESIZE); extent_destroy(ioext); extent_destroy(memext); #endif /* PCI_NETBSD_CONFIGURE */ #endif /* NPCI */ #if NPCI > 0 memset(&mba, 0, sizeof(mba)); mba.mba_pba.pba_iot = &prep_io_space_tag; mba.mba_pba.pba_memt = &prep_mem_space_tag; mba.mba_pba.pba_dmat = &pci_bus_dma_tag; mba.mba_pba.pba_dmat64 = NULL; mba.mba_pba.pba_pc = genppc_pct; mba.mba_pba.pba_bus = 0; mba.mba_pba.pba_bridgetag = NULL; mba.mba_pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY; config_found_ia(self, "pcibus", &mba.mba_pba, pcibusprint); #endif /* NPCI */ #ifdef RESIDUAL_DATA_DUMP SIMPLEQ_FOREACH(pbi, &genppc_pct->pc_pbi, next) printf("%s\n", prop_dictionary_externalize(pbi->pbi_properties)); #endif }
/* * ae_attach: * * Attach an ae interface to the system. */ void ae_attach(device_t parent, device_t self, void *aux) { const uint8_t *enaddr; prop_data_t ea; struct ae_softc *sc = device_private(self); struct arbus_attach_args *aa = aux; struct ifnet *ifp = &sc->sc_ethercom.ec_if; int i, error; sc->sc_dev = self; callout_init(&sc->sc_tick_callout, 0); printf(": Atheros AR531X 10/100 Ethernet\n"); /* * Try to get MAC address. */ ea = prop_dictionary_get(device_properties(sc->sc_dev), "mac-address"); if (ea == NULL) { printf("%s: unable to get mac-addr property\n", device_xname(sc->sc_dev)); return; } KASSERT(prop_object_type(ea) == PROP_TYPE_DATA); KASSERT(prop_data_size(ea) == ETHER_ADDR_LEN); enaddr = prop_data_data_nocopy(ea); /* Announce ourselves. */ printf("%s: Ethernet address %s\n", device_xname(sc->sc_dev), ether_sprintf(enaddr)); sc->sc_cirq = aa->aa_cirq; sc->sc_mirq = aa->aa_mirq; sc->sc_st = aa->aa_bst; sc->sc_dmat = aa->aa_dmat; SIMPLEQ_INIT(&sc->sc_txfreeq); SIMPLEQ_INIT(&sc->sc_txdirtyq); /* * Map registers. */ sc->sc_size = aa->aa_size; if ((error = bus_space_map(sc->sc_st, aa->aa_addr, sc->sc_size, 0, &sc->sc_sh)) != 0) { printf("%s: unable to map registers, error = %d\n", device_xname(sc->sc_dev), error); goto fail_0; } /* * Allocate the control data structures, and create and load the * DMA map for it. */ if ((error = bus_dmamem_alloc(sc->sc_dmat, sizeof(struct ae_control_data), PAGE_SIZE, 0, &sc->sc_cdseg, 1, &sc->sc_cdnseg, 0)) != 0) { printf("%s: unable to allocate control data, error = %d\n", device_xname(sc->sc_dev), error); goto fail_1; } if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg, sizeof(struct ae_control_data), (void **)&sc->sc_control_data, BUS_DMA_COHERENT)) != 0) { printf("%s: unable to map control data, error = %d\n", device_xname(sc->sc_dev), error); goto fail_2; } if ((error = bus_dmamap_create(sc->sc_dmat, sizeof(struct ae_control_data), 1, sizeof(struct ae_control_data), 0, 0, &sc->sc_cddmamap)) != 0) { printf("%s: unable to create control data DMA map, " "error = %d\n", device_xname(sc->sc_dev), error); goto fail_3; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_cddmamap, sc->sc_control_data, sizeof(struct ae_control_data), NULL, 0)) != 0) { printf("%s: unable to load control data DMA map, error = %d\n", device_xname(sc->sc_dev), error); goto fail_4; } /* * Create the transmit buffer DMA maps. */ for (i = 0; i < AE_TXQUEUELEN; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, AE_NTXSEGS, MCLBYTES, 0, 0, &sc->sc_txsoft[i].txs_dmamap)) != 0) { printf("%s: unable to create tx DMA map %d, " "error = %d\n", device_xname(sc->sc_dev), i, error); goto fail_5; } } /* * Create the receive buffer DMA maps. */ for (i = 0; i < AE_NRXDESC; i++) { if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, 0, &sc->sc_rxsoft[i].rxs_dmamap)) != 0) { printf("%s: unable to create rx DMA map %d, " "error = %d\n", device_xname(sc->sc_dev), i, error); goto fail_6; } sc->sc_rxsoft[i].rxs_mbuf = NULL; } /* * Reset the chip to a known state. */ ae_reset(sc); /* * From this point forward, the attachment cannot fail. A failure * before this point releases all resources that may have been * allocated. */ sc->sc_flags |= AE_ATTACHED; /* * Initialize our media structures. This may probe the MII, if * present. */ sc->sc_mii.mii_ifp = ifp; sc->sc_mii.mii_readreg = ae_mii_readreg; sc->sc_mii.mii_writereg = ae_mii_writereg; sc->sc_mii.mii_statchg = ae_mii_statchg; sc->sc_ethercom.ec_mii = &sc->sc_mii; ifmedia_init(&sc->sc_mii.mii_media, 0, ether_mediachange, ether_mediastatus); mii_attach(sc->sc_dev, &sc->sc_mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); if (LIST_FIRST(&sc->sc_mii.mii_phys) == NULL) { ifmedia_add(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_NONE); } else ifmedia_set(&sc->sc_mii.mii_media, IFM_ETHER|IFM_AUTO); sc->sc_tick = ae_mii_tick; strcpy(ifp->if_xname, device_xname(sc->sc_dev)); ifp->if_softc = sc; ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; sc->sc_if_flags = ifp->if_flags; ifp->if_ioctl = ae_ioctl; ifp->if_start = ae_start; ifp->if_watchdog = ae_watchdog; ifp->if_init = ae_init; ifp->if_stop = ae_stop; IFQ_SET_READY(&ifp->if_snd); /* * We can support 802.1Q VLAN-sized frames. */ sc->sc_ethercom.ec_capabilities |= ETHERCAP_VLAN_MTU; /* * Attach the interface. */ if_attach(ifp); ether_ifattach(ifp, enaddr); ether_set_ifflags_cb(&sc->sc_ethercom, ae_ifflags_cb); rnd_attach_source(&sc->sc_rnd_source, device_xname(sc->sc_dev), RND_TYPE_NET, RND_FLAG_DEFAULT); /* * Make sure the interface is shutdown during reboot. */ sc->sc_sdhook = shutdownhook_establish(ae_shutdown, sc); if (sc->sc_sdhook == NULL) printf("%s: WARNING: unable to establish shutdown hook\n", device_xname(sc->sc_dev)); /* * Add a suspend hook to make sure we come back up after a * resume. */ sc->sc_powerhook = powerhook_establish(device_xname(sc->sc_dev), ae_power, sc); if (sc->sc_powerhook == NULL) printf("%s: WARNING: unable to establish power hook\n", device_xname(sc->sc_dev)); return; /* * Free any resources we've allocated during the failed attach * attempt. Do this in reverse order and fall through. */ fail_6: for (i = 0; i < AE_NRXDESC; i++) { if (sc->sc_rxsoft[i].rxs_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_rxsoft[i].rxs_dmamap); } fail_5: for (i = 0; i < AE_TXQUEUELEN; i++) { if (sc->sc_txsoft[i].txs_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, sc->sc_txsoft[i].txs_dmamap); } bus_dmamap_unload(sc->sc_dmat, sc->sc_cddmamap); fail_4: bus_dmamap_destroy(sc->sc_dmat, sc->sc_cddmamap); fail_3: bus_dmamem_unmap(sc->sc_dmat, (void *)sc->sc_control_data, sizeof(struct ae_control_data)); fail_2: bus_dmamem_free(sc->sc_dmat, &sc->sc_cdseg, sc->sc_cdnseg); fail_1: bus_space_unmap(sc->sc_st, sc->sc_sh, sc->sc_size); fail_0: return; }
/* * Initialise our interface to the controller. */ int cac_init(struct cac_softc *sc, const char *intrstr, int startfw) { struct cac_controller_info cinfo; int error, rseg, size, i; bus_dma_segment_t seg; struct cac_ccb *ccb; char firm[8]; if (intrstr != NULL) aprint_normal_dev(sc->sc_dev, "interrupting at %s\n", intrstr); SIMPLEQ_INIT(&sc->sc_ccb_free); SIMPLEQ_INIT(&sc->sc_ccb_queue); mutex_init(&sc->sc_mutex, MUTEX_DEFAULT, IPL_VM); cv_init(&sc->sc_ccb_cv, "cacccb"); size = sizeof(struct cac_ccb) * CAC_MAX_CCBS; if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, &seg, 1, &rseg, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to allocate CCBs, error = %d\n", error); return (-1); } if ((error = bus_dmamem_map(sc->sc_dmat, &seg, rseg, size, (void **)&sc->sc_ccbs, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to map CCBs, error = %d\n", error); return (-1); } if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { aprint_error_dev(sc->sc_dev, "unable to create CCB DMA map, error = %d\n", error); return (-1); } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_ccbs, size, NULL, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(sc->sc_dev, "unable to load CCB DMA map, error = %d\n", error); return (-1); } sc->sc_ccbs_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; memset(sc->sc_ccbs, 0, size); ccb = (struct cac_ccb *)sc->sc_ccbs; for (i = 0; i < CAC_MAX_CCBS; i++, ccb++) { /* Create the DMA map for this CCB's data */ error = bus_dmamap_create(sc->sc_dmat, CAC_MAX_XFER, CAC_SG_SIZE, CAC_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap_xfer); if (error) { aprint_error_dev(sc->sc_dev, "can't create ccb dmamap (%d)\n", error); break; } ccb->ccb_flags = 0; ccb->ccb_paddr = sc->sc_ccbs_paddr + i * sizeof(struct cac_ccb); SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_chain); } /* Start firmware background tasks, if needed. */ if (startfw) { if (cac_cmd(sc, CAC_CMD_START_FIRMWARE, &cinfo, sizeof(cinfo), 0, 0, CAC_CCB_DATA_IN, NULL)) { aprint_error_dev(sc->sc_dev, "CAC_CMD_START_FIRMWARE failed\n"); return (-1); } } if (cac_cmd(sc, CAC_CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 0, 0, CAC_CCB_DATA_IN, NULL)) { aprint_error_dev(sc->sc_dev, "CAC_CMD_GET_CTRL_INFO failed\n"); return (-1); } strlcpy(firm, cinfo.firm_rev, 4+1); printf("%s: %d channels, firmware <%s>\n", device_xname(sc->sc_dev), cinfo.scsi_chips, firm); /* Limit number of units to size of our sc_unitmask */ sc->sc_nunits = cinfo.num_drvs; if (sc->sc_nunits > sizeof(sc->sc_unitmask) * NBBY) sc->sc_nunits = sizeof(sc->sc_unitmask) * NBBY; /* Attach our units */ sc->sc_unitmask = 0; cac_rescan(sc->sc_dev, "cac", 0); /* Set our `shutdownhook' before we start any device activity. */ if (cac_sdh == NULL) cac_sdh = shutdownhook_establish(cac_shutdown, NULL); mutex_enter(&sc->sc_mutex); (*sc->sc_cl.cl_intr_enable)(sc, CAC_INTR_ENABLE); mutex_exit(&sc->sc_mutex); #if NBIO > 0 if (bio_register(sc->sc_dev, cac_ioctl) != 0) aprint_error_dev(sc->sc_dev, "controller registration failed"); else sc->sc_ioctl = cac_ioctl; if (cac_create_sensors(sc) != 0) aprint_error_dev(sc->sc_dev, "unable to create sensors\n"); #endif return (0); }
static int ld_ataraid_start_raid0(struct ld_softc *ld, struct buf *bp) { struct ld_ataraid_softc *sc = (void *) ld; struct ataraid_array_info *aai = sc->sc_aai; struct ataraid_disk_info *adi; SIMPLEQ_HEAD(, cbuf) cbufq; struct cbuf *cbp, *other_cbp; char *addr; daddr_t bn, cbn, tbn, off; long bcount, rcount; u_int comp; const int read = bp->b_flags & B_READ; const int mirror = aai->aai_level & AAI_L_RAID1; int error; /* Allocate component buffers. */ SIMPLEQ_INIT(&cbufq); addr = bp->b_data; bn = bp->b_rawblkno; bp->b_resid = bp->b_bcount; for (bcount = bp->b_bcount; bcount > 0; bcount -= rcount) { tbn = bn / aai->aai_interleave; off = bn % aai->aai_interleave; if (__predict_false(tbn == aai->aai_capacity / aai->aai_interleave)) { /* Last stripe. */ daddr_t sz = (aai->aai_capacity - (tbn * aai->aai_interleave)) / aai->aai_width; comp = off / sz; cbn = ((tbn / aai->aai_width) * aai->aai_interleave) + (off % sz); rcount = min(bcount, dbtob(sz)); } else { comp = tbn % aai->aai_width; cbn = ((tbn / aai->aai_width) * aai->aai_interleave) + off; rcount = min(bcount, dbtob(aai->aai_interleave - off)); } /* * See if a component is valid. */ try_mirror: adi = &aai->aai_disks[comp]; if ((adi->adi_status & ADI_S_ONLINE) == 0) { if (mirror && comp < aai->aai_width) { comp += aai->aai_width; goto try_mirror; } /* * No component available. */ error = EIO; goto free_and_exit; } cbp = ld_ataraid_make_cbuf(sc, bp, comp, cbn, addr, rcount); if (cbp == NULL) { resource_shortage: error = EAGAIN; free_and_exit: /* Free the already allocated component buffers. */ while ((cbp = SIMPLEQ_FIRST(&cbufq)) != NULL) { SIMPLEQ_REMOVE_HEAD(&cbufq, cb_q); buf_destroy(&cbp->cb_buf); CBUF_PUT(cbp); } return (error); } SIMPLEQ_INSERT_TAIL(&cbufq, cbp, cb_q); if (mirror && !read && comp < aai->aai_width) { comp += aai->aai_width; adi = &aai->aai_disks[comp]; if (adi->adi_status & ADI_S_ONLINE) { other_cbp = ld_ataraid_make_cbuf(sc, bp, comp, cbn, addr, rcount); if (other_cbp == NULL) goto resource_shortage; SIMPLEQ_INSERT_TAIL(&cbufq, other_cbp, cb_q); other_cbp->cb_other = cbp; cbp->cb_other = other_cbp; } } bn += btodb(rcount); addr += rcount; } /* Now fire off the requests. */ while ((cbp = SIMPLEQ_FIRST(&cbufq)) != NULL) { SIMPLEQ_REMOVE_HEAD(&cbufq, cb_q); if ((cbp->cb_buf.b_flags & B_READ) == 0) { mutex_enter(&cbp->cb_buf.b_vp->v_interlock); cbp->cb_buf.b_vp->v_numoutput++; mutex_exit(&cbp->cb_buf.b_vp->v_interlock); } VOP_STRATEGY(cbp->cb_buf.b_vp, &cbp->cb_buf); } return (0); }
void qlw_scsi_cmd(struct scsi_xfer *xs) { struct scsi_link *link = xs->sc_link; struct qlw_softc *sc = link->adapter_softc; struct qlw_ccb *ccb; struct qlw_iocb_req0 *iocb; struct qlw_ccb_list list; u_int16_t req, rspin; int offset, error, done; bus_dmamap_t dmap; int bus; int seg; if (xs->cmdlen > sizeof(iocb->cdb)) { DPRINTF(QLW_D_IO, "%s: cdb too big (%d)\n", DEVNAME(sc), xs->cmdlen); memset(&xs->sense, 0, sizeof(xs->sense)); xs->sense.error_code = SSD_ERRCODE_VALID | SSD_ERRCODE_CURRENT; xs->sense.flags = SKEY_ILLEGAL_REQUEST; xs->sense.add_sense_code = 0x20; xs->error = XS_SENSE; scsi_done(xs); return; } ccb = xs->io; dmap = ccb->ccb_dmamap; if (xs->datalen > 0) { error = bus_dmamap_load(sc->sc_dmat, dmap, xs->data, xs->datalen, NULL, (xs->flags & SCSI_NOSLEEP) ? BUS_DMA_NOWAIT : BUS_DMA_WAITOK); if (error) { xs->error = XS_DRIVER_STUFFUP; scsi_done(xs); return; } bus_dmamap_sync(sc->sc_dmat, dmap, 0, dmap->dm_mapsize, (xs->flags & SCSI_DATA_IN) ? BUS_DMASYNC_PREREAD : BUS_DMASYNC_PREWRITE); } mtx_enter(&sc->sc_queue_mtx); /* put in a sync marker if required */ bus = qlw_xs_bus(sc, xs); if (sc->sc_marker_required[bus]) { req = sc->sc_next_req_id++; if (sc->sc_next_req_id == sc->sc_maxrequests) sc->sc_next_req_id = 0; DPRINTF(QLW_D_IO, "%s: writing marker at request %d\n", DEVNAME(sc), req); offset = (req * QLW_QUEUE_ENTRY_SIZE); iocb = QLW_DMA_KVA(sc->sc_requests) + offset; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); qlw_put_marker(sc, bus, iocb); bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id); sc->sc_marker_required[bus] = 0; } req = sc->sc_next_req_id++; if (sc->sc_next_req_id == sc->sc_maxrequests) sc->sc_next_req_id = 0; offset = (req * QLW_QUEUE_ENTRY_SIZE); iocb = QLW_DMA_KVA(sc->sc_requests) + offset; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); ccb->ccb_xs = xs; DPRINTF(QLW_D_IO, "%s: writing cmd at request %d\n", DEVNAME(sc), req); qlw_put_cmd(sc, iocb, xs, ccb); seg = QLW_IOCB_SEGS_PER_CMD; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); while (seg < ccb->ccb_dmamap->dm_nsegs) { req = sc->sc_next_req_id++; if (sc->sc_next_req_id == sc->sc_maxrequests) sc->sc_next_req_id = 0; offset = (req * QLW_QUEUE_ENTRY_SIZE); iocb = QLW_DMA_KVA(sc->sc_requests) + offset; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_POSTWRITE); DPRINTF(QLW_D_IO, "%s: writing cont at request %d\n", DEVNAME(sc), req); qlw_put_cont(sc, iocb, xs, ccb, seg); seg += QLW_IOCB_SEGS_PER_CONT; bus_dmamap_sync(sc->sc_dmat, QLW_DMA_MAP(sc->sc_requests), offset, QLW_QUEUE_ENTRY_SIZE, BUS_DMASYNC_PREWRITE); } qlw_queue_write(sc, QLW_REQ_IN, sc->sc_next_req_id); if (!ISSET(xs->flags, SCSI_POLL)) { mtx_leave(&sc->sc_queue_mtx); return; } done = 0; SIMPLEQ_INIT(&list); do { u_int16_t isr, info; delay(100); if (qlw_read_isr(sc, &isr, &info) == 0) { continue; } if (isr != QLW_INT_TYPE_IO) { qlw_handle_intr(sc, isr, info); continue; } qlw_clear_isr(sc, isr); rspin = qlw_queue_read(sc, QLW_RESP_IN); while (rspin != sc->sc_last_resp_id) { ccb = qlw_handle_resp(sc, sc->sc_last_resp_id); sc->sc_last_resp_id++; if (sc->sc_last_resp_id == sc->sc_maxresponses) sc->sc_last_resp_id = 0; if (ccb != NULL) SIMPLEQ_INSERT_TAIL(&list, ccb, ccb_link); if (ccb == xs->io) done = 1; } qlw_queue_write(sc, QLW_RESP_OUT, rspin); } while (done == 0); mtx_leave(&sc->sc_queue_mtx); while ((ccb = SIMPLEQ_FIRST(&list)) != NULL) { SIMPLEQ_REMOVE_HEAD(&list, ccb_link); scsi_done(ccb->ccb_xs); } }
static void ofwpci_attach(device_t parent, device_t self, void *aux) { struct ofwpci_softc *sc = device_private(self); pci_chipset_tag_t pc = &sc->sc_pc; struct confargs *ca = aux; struct pcibus_attach_args pba; struct genppc_pci_chipset_businfo *pbi; int node = ca->ca_node; int i, isprim = 0; uint32_t busrange[2]; char buf[64]; #ifdef PCI_NETBSD_CONFIGURE struct extent *ioext, *memext; #endif aprint_normal("\n"); sc->sc_dev = self; /* PCI bus number */ if (OF_getprop(node, "bus-range", busrange, sizeof(busrange)) != 8) return; /* bus space map the io ranges */ sc->sc_iot.pbs_flags = _BUS_SPACE_LITTLE_ENDIAN|_BUS_SPACE_IO_TYPE; sc->sc_iot.pbs_base = 0x00000000; if (ofwoea_map_space(RANGE_TYPE_PCI, RANGE_IO, node, &sc->sc_iot, "ofwpci io-space") != 0) panic("Can't init ofwpci io tag"); sc->sc_memt.pbs_flags = _BUS_SPACE_LITTLE_ENDIAN|_BUS_SPACE_MEM_TYPE; sc->sc_memt.pbs_base = 0x00000000; if (ofwoea_map_space(RANGE_TYPE_PCI, RANGE_MEM, node, &sc->sc_memt, "ofwpci mem-space") != 0) panic("Can't init ofwpci mem tag"); aprint_debug("io base=0x%"PRIxPTR" offset=0x%"PRIxPTR" limit=0x%"PRIxPTR"\n", sc->sc_iot.pbs_base, sc->sc_iot.pbs_offset, sc->sc_iot.pbs_limit); aprint_debug("mem base=0x%"PRIxPTR" offset=0x%"PRIxPTR" limit=0x%"PRIxPTR"\n", sc->sc_memt.pbs_base, sc->sc_memt.pbs_offset, sc->sc_memt.pbs_limit); /* are we the primary pci bus? */ if (of_find_firstchild_byname(OF_finddevice("/"), "pci") == node) { int isa_node; isprim++; /* yes we are, now do we have an ISA child? */ isa_node = of_find_firstchild_byname(node, "isa"); if (isa_node != -1) { /* isa == pci */ genppc_isa_io_space_tag = sc->sc_iot; genppc_isa_mem_space_tag = sc->sc_memt; map_isa_ioregs(); init_ofppc_interrupt(); ofppc_init_comcons(isa_node); } } ofwpci_get_chipset_tag(pc); pc->pc_node = node; i = OF_package_to_path(node, buf, sizeof(buf)-5); if (i <= 0) panic("Can't determine path for pci node %d", node); buf[i] = '\0'; pc->pc_ihandle = OF_open(buf); if (pc->pc_ihandle < 0) panic("Can't open device %s", buf); pc->pc_bus = busrange[0]; pc->pc_iot = &sc->sc_iot; pc->pc_memt = &sc->sc_memt; pbi = malloc(sizeof(struct genppc_pci_chipset_businfo), M_DEVBUF, M_NOWAIT); KASSERT(pbi != NULL); pbi->pbi_properties = prop_dictionary_create(); KASSERT(pbi->pbi_properties != NULL); SIMPLEQ_INIT(&pc->pc_pbi); SIMPLEQ_INSERT_TAIL(&pc->pc_pbi, pbi, next); genofw_setup_pciintr_map((void *)pc, pbi, pc->pc_node); #ifdef PCI_NETBSD_CONFIGURE ioext = extent_create("pciio", modeldata.pciiodata[device_unit(self)].start, modeldata.pciiodata[device_unit(self)].limit, NULL, 0, EX_NOWAIT); memext = extent_create("pcimem", sc->sc_memt.pbs_base, sc->sc_memt.pbs_limit-1, NULL, 0, EX_NOWAIT); if (pci_configure_bus(pc, ioext, memext, NULL, 0, CACHELINESIZE)) aprint_error("pci_configure_bus() failed\n"); extent_destroy(ioext); extent_destroy(memext); #endif /* PCI_NETBSD_CONFIGURE */ memset(&pba, 0, sizeof(pba)); pba.pba_memt = pc->pc_memt; pba.pba_iot = pc->pc_iot; pba.pba_dmat = &pci_bus_dma_tag; pba.pba_dmat64 = NULL; pba.pba_bus = pc->pc_bus; pba.pba_bridgetag = NULL; pba.pba_pc = pc; pba.pba_flags = PCI_FLAGS_IO_OKAY | PCI_FLAGS_MEM_OKAY; config_found_ia(self, "pcibus", &pba, pcibusprint); }
static int ld_ataraid_start_span(struct ld_softc *ld, struct buf *bp) { struct ld_ataraid_softc *sc = (void *) ld; struct ataraid_array_info *aai = sc->sc_aai; struct ataraid_disk_info *adi; SIMPLEQ_HEAD(, cbuf) cbufq; struct cbuf *cbp; char *addr; daddr_t bn; long bcount, rcount; u_int comp; /* Allocate component buffers. */ SIMPLEQ_INIT(&cbufq); addr = bp->b_data; /* Find the first component. */ comp = 0; adi = &aai->aai_disks[comp]; bn = bp->b_rawblkno; while (bn >= adi->adi_compsize) { bn -= adi->adi_compsize; adi = &aai->aai_disks[++comp]; } bp->b_resid = bp->b_bcount; for (bcount = bp->b_bcount; bcount > 0; bcount -= rcount) { rcount = bp->b_bcount; if ((adi->adi_compsize - bn) < btodb(rcount)) rcount = dbtob(adi->adi_compsize - bn); cbp = ld_ataraid_make_cbuf(sc, bp, comp, bn, addr, rcount); if (cbp == NULL) { /* Free the already allocated component buffers. */ while ((cbp = SIMPLEQ_FIRST(&cbufq)) != NULL) { SIMPLEQ_REMOVE_HEAD(&cbufq, cb_q); buf_destroy(&cbp->cb_buf); CBUF_PUT(cbp); } return (EAGAIN); } /* * For a span, we always know we advance to the next disk, * and always start at offset 0 on that disk. */ adi = &aai->aai_disks[++comp]; bn = 0; SIMPLEQ_INSERT_TAIL(&cbufq, cbp, cb_q); addr += rcount; } /* Now fire off the requests. */ while ((cbp = SIMPLEQ_FIRST(&cbufq)) != NULL) { SIMPLEQ_REMOVE_HEAD(&cbufq, cb_q); if ((cbp->cb_buf.b_flags & B_READ) == 0) { mutex_enter(&cbp->cb_buf.b_vp->v_interlock); cbp->cb_buf.b_vp->v_numoutput++; mutex_exit(&cbp->cb_buf.b_vp->v_interlock); } VOP_STRATEGY(cbp->cb_buf.b_vp, &cbp->cb_buf); } return (0); }
int ucomopen(dev_t dev, int flag, int mode, struct lwp *l) { int unit = UCOMUNIT(dev); usbd_status err; struct ucom_softc *sc = device_lookup_private(&ucom_cd, unit); struct ucom_buffer *ub; struct tty *tp; int s, i; int error; if (sc == NULL) return (ENXIO); if (sc->sc_dying) return (EIO); if (!device_is_active(sc->sc_dev)) return (ENXIO); tp = sc->sc_tty; DPRINTF(("ucomopen: unit=%d, tp=%p\n", unit, tp)); if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp)) return (EBUSY); s = spltty(); /* * Do the following iff this is a first open. */ while (sc->sc_opening) tsleep(&sc->sc_opening, PRIBIO, "ucomop", 0); if (sc->sc_dying) { splx(s); return (EIO); } sc->sc_opening = 1; if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { struct termios t; tp->t_dev = dev; if (sc->sc_methods->ucom_open != NULL) { error = sc->sc_methods->ucom_open(sc->sc_parent, sc->sc_portno); if (error) { ucom_cleanup(sc); sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); return (error); } } ucom_status_change(sc); /* Clear PPS capture state on first open. */ mutex_spin_enter(&timecounter_lock); memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state)); sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR; pps_init(&sc->sc_pps_state); mutex_spin_exit(&timecounter_lock); /* * Initialize the termios status to the defaults. Add in the * sticky bits from TIOCSFLAGS. */ t.c_ispeed = 0; t.c_ospeed = TTYDEF_SPEED; t.c_cflag = TTYDEF_CFLAG; if (ISSET(sc->sc_swflags, TIOCFLAG_CLOCAL)) SET(t.c_cflag, CLOCAL); if (ISSET(sc->sc_swflags, TIOCFLAG_CRTSCTS)) SET(t.c_cflag, CRTSCTS); if (ISSET(sc->sc_swflags, TIOCFLAG_MDMBUF)) SET(t.c_cflag, MDMBUF); /* Make sure ucomparam() will do something. */ tp->t_ospeed = 0; (void) ucomparam(tp, &t); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; ttychars(tp); ttsetwater(tp); /* * Turn on DTR. We must always do this, even if carrier is not * present, because otherwise we'd have to use TIOCSDTR * immediately after setting CLOCAL, which applications do not * expect. We always assert DTR while the device is open * unless explicitly requested to deassert it. Ditto RTS. */ ucom_dtr(sc, 1); ucom_rts(sc, 1); DPRINTF(("ucomopen: open pipes in=%d out=%d\n", sc->sc_bulkin_no, sc->sc_bulkout_no)); /* Open the bulk pipes */ err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkin_no, USBD_EXCLUSIVE_USE, &sc->sc_bulkin_pipe); if (err) { DPRINTF(("%s: open bulk in error (addr %d), err=%s\n", device_xname(sc->sc_dev), sc->sc_bulkin_no, usbd_errstr(err))); error = EIO; goto fail_0; } err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkout_no, USBD_EXCLUSIVE_USE, &sc->sc_bulkout_pipe); if (err) { DPRINTF(("%s: open bulk out error (addr %d), err=%s\n", device_xname(sc->sc_dev), sc->sc_bulkout_no, usbd_errstr(err))); error = EIO; goto fail_1; } sc->sc_rx_unblock = 0; sc->sc_rx_stopped = 0; sc->sc_tx_stopped = 0; memset(sc->sc_ibuff, 0, sizeof(sc->sc_ibuff)); memset(sc->sc_obuff, 0, sizeof(sc->sc_obuff)); SIMPLEQ_INIT(&sc->sc_ibuff_empty); SIMPLEQ_INIT(&sc->sc_ibuff_full); SIMPLEQ_INIT(&sc->sc_obuff_free); SIMPLEQ_INIT(&sc->sc_obuff_full); /* Allocate input buffers */ for (ub = &sc->sc_ibuff[0]; ub != &sc->sc_ibuff[UCOM_IN_BUFFS]; ub++) { ub->ub_xfer = usbd_alloc_xfer(sc->sc_udev); if (ub->ub_xfer == NULL) { error = ENOMEM; goto fail_2; } ub->ub_data = usbd_alloc_buffer(ub->ub_xfer, sc->sc_ibufsizepad); if (ub->ub_data == NULL) { error = ENOMEM; goto fail_2; } if (ucomsubmitread(sc, ub) != USBD_NORMAL_COMPLETION) { error = EIO; goto fail_2; } } for (ub = &sc->sc_obuff[0]; ub != &sc->sc_obuff[UCOM_OUT_BUFFS]; ub++) { ub->ub_xfer = usbd_alloc_xfer(sc->sc_udev); if (ub->ub_xfer == NULL) { error = ENOMEM; goto fail_2; } ub->ub_data = usbd_alloc_buffer(ub->ub_xfer, sc->sc_obufsize); if (ub->ub_data == NULL) { error = ENOMEM; goto fail_2; } SIMPLEQ_INSERT_TAIL(&sc->sc_obuff_free, ub, ub_link); } } sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); error = ttyopen(tp, UCOMDIALOUT(dev), ISSET(flag, O_NONBLOCK)); if (error) goto bad; error = (*tp->t_linesw->l_open)(dev, tp); if (error) goto bad; return (0); fail_2: usbd_abort_pipe(sc->sc_bulkin_pipe); for (i = 0; i < UCOM_IN_BUFFS; i++) { if (sc->sc_ibuff[i].ub_xfer != NULL) { usbd_free_xfer(sc->sc_ibuff[i].ub_xfer); sc->sc_ibuff[i].ub_xfer = NULL; sc->sc_ibuff[i].ub_data = NULL; } } usbd_abort_pipe(sc->sc_bulkout_pipe); for (i = 0; i < UCOM_OUT_BUFFS; i++) { if (sc->sc_obuff[i].ub_xfer != NULL) { usbd_free_xfer(sc->sc_obuff[i].ub_xfer); sc->sc_obuff[i].ub_xfer = NULL; sc->sc_obuff[i].ub_data = NULL; } } usbd_close_pipe(sc->sc_bulkout_pipe); sc->sc_bulkout_pipe = NULL; fail_1: usbd_close_pipe(sc->sc_bulkin_pipe); sc->sc_bulkin_pipe = NULL; fail_0: sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); return (error); bad: s = spltty(); CLR(tp->t_state, TS_BUSY); if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { /* * We failed to open the device, and nobody else had it opened. * Clean up the state as appropriate. */ ucom_cleanup(sc); } splx(s); return (error); }
static void ld_ataraid_attach(device_t parent, device_t self, void *aux) { struct ld_ataraid_softc *sc = device_private(self); struct ld_softc *ld = &sc->sc_ld; struct ataraid_array_info *aai = aux; struct ataraid_disk_info *adi = NULL; const char *level; struct vnode *vp; char unklev[32]; u_int i; ld->sc_dv = self; sc->sc_cbufpool = pool_cache_init(sizeof(struct cbuf), 0, 0, 0, "ldcbuf", NULL, IPL_BIO, cbufpool_ctor, cbufpool_dtor, sc); sc->sc_sih_cookie = softint_establish(SOFTINT_BIO, ld_ataraid_start_vstrategy, sc); sc->sc_aai = aai; /* this data persists */ ld->sc_maxxfer = MAXPHYS * aai->aai_width; /* XXX */ ld->sc_secperunit = aai->aai_capacity; ld->sc_secsize = 512; /* XXX */ ld->sc_maxqueuecnt = 128; /* XXX */ ld->sc_dump = ld_ataraid_dump; switch (aai->aai_level) { case AAI_L_SPAN: level = "SPAN"; ld->sc_start = ld_ataraid_start_span; sc->sc_iodone = ld_ataraid_iodone_raid0; break; case AAI_L_RAID0: level = "RAID-0"; ld->sc_start = ld_ataraid_start_raid0; sc->sc_iodone = ld_ataraid_iodone_raid0; break; case AAI_L_RAID1: level = "RAID-1"; ld->sc_start = ld_ataraid_start_raid0; sc->sc_iodone = ld_ataraid_iodone_raid0; break; case AAI_L_RAID0 | AAI_L_RAID1: level = "RAID-10"; ld->sc_start = ld_ataraid_start_raid0; sc->sc_iodone = ld_ataraid_iodone_raid0; break; default: snprintf(unklev, sizeof(unklev), "<unknown level 0x%x>", aai->aai_level); level = unklev; } aprint_naive(": ATA %s array\n", level); aprint_normal(": %s ATA %s array\n", ata_raid_type_name(aai->aai_type), level); if (ld->sc_start == NULL) { aprint_error_dev(ld->sc_dv, "unsupported array type\n"); return; } /* * We get a geometry from the device; use it. */ ld->sc_nheads = aai->aai_heads; ld->sc_nsectors = aai->aai_sectors; ld->sc_ncylinders = aai->aai_cylinders; /* * Configure all the component disks. */ for (i = 0; i < aai->aai_ndisks; i++) { adi = &aai->aai_disks[i]; vp = ata_raid_disk_vnode_find(adi); if (vp == NULL) { /* * XXX This is bogus. We should just mark the * XXX component as FAILED, and write-back new * XXX config blocks. */ break; } sc->sc_vnodes[i] = vp; } if (i == aai->aai_ndisks) { ld->sc_flags = LDF_ENABLED; goto finish; } for (i = 0; i < aai->aai_ndisks; i++) { vp = sc->sc_vnodes[i]; sc->sc_vnodes[i] = NULL; if (vp != NULL) (void) vn_close(vp, FREAD|FWRITE, NOCRED); } finish: #if NBIO > 0 if (bio_register(self, ld_ataraid_bioctl) != 0) panic("%s: bioctl registration failed\n", device_xname(ld->sc_dv)); #endif SIMPLEQ_INIT(&sc->sc_cbufq); ldattach(ld); }
void ubsec_attach(struct device *parent, struct device *self, void *aux) { struct ubsec_softc *sc = (struct ubsec_softc *)self; struct pci_attach_args *pa = aux; pci_chipset_tag_t pc = pa->pa_pc; pci_intr_handle_t ih; pcireg_t memtype; const char *intrstr = NULL; struct ubsec_dma *dmap; bus_size_t iosize; u_int32_t i; int algs[CRYPTO_ALGORITHM_MAX + 1]; SIMPLEQ_INIT(&sc->sc_queue); SIMPLEQ_INIT(&sc->sc_qchip); SIMPLEQ_INIT(&sc->sc_queue2); SIMPLEQ_INIT(&sc->sc_qchip2); SIMPLEQ_INIT(&sc->sc_queue4); SIMPLEQ_INIT(&sc->sc_qchip4); sc->sc_statmask = BS_STAT_MCR1_DONE | BS_STAT_DMAERR; sc->sc_maxaggr = UBS_MIN_AGGR; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BLUESTEEL && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BLUESTEEL_5601) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5802 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5805)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5820 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5822)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; if ((PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5821) || (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_SUN && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_SCA1K || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_SUN_5821))) { sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY; } if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5823 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5825)) sc->sc_flags |= UBS_FLAGS_KEY | UBS_FLAGS_RNG | UBS_FLAGS_LONGCTX | UBS_FLAGS_HWNORM | UBS_FLAGS_BIGKEY | UBS_FLAGS_AES; if (PCI_VENDOR(pa->pa_id) == PCI_VENDOR_BROADCOM && (PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5860 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5861 || PCI_PRODUCT(pa->pa_id) == PCI_PRODUCT_BROADCOM_5862)) { sc->sc_maxaggr = UBS_MAX_AGGR; sc->sc_statmask |= BS_STAT_MCR1_ALLEMPTY | BS_STAT_MCR2_ALLEMPTY | BS_STAT_MCR3_ALLEMPTY | BS_STAT_MCR4_ALLEMPTY; sc->sc_flags |= UBS_FLAGS_MULTIMCR | UBS_FLAGS_HWNORM | UBS_FLAGS_LONGCTX | UBS_FLAGS_AES | UBS_FLAGS_KEY | UBS_FLAGS_BIGKEY; #if 0 /* The RNG is not yet supported */ sc->sc_flags |= UBS_FLAGS_RNG | UBS_FLAGS_RNG4; #endif } memtype = pci_mapreg_type(pa->pa_pc, pa->pa_tag, BS_BAR); if (pci_mapreg_map(pa, BS_BAR, memtype, 0, &sc->sc_st, &sc->sc_sh, NULL, &iosize, 0)) { printf(": can't find mem space\n"); return; } sc->sc_dmat = pa->pa_dmat; if (pci_intr_map(pa, &ih)) { printf(": couldn't map interrupt\n"); bus_space_unmap(sc->sc_st, sc->sc_sh, iosize); return; } intrstr = pci_intr_string(pc, ih); sc->sc_ih = pci_intr_establish(pc, ih, IPL_NET, ubsec_intr, sc, self->dv_xname); if (sc->sc_ih == NULL) { printf(": couldn't establish interrupt"); if (intrstr != NULL) printf(" at %s", intrstr); printf("\n"); bus_space_unmap(sc->sc_st, sc->sc_sh, iosize); return; } sc->sc_cid = crypto_get_driverid(0); if (sc->sc_cid < 0) { pci_intr_disestablish(pc, sc->sc_ih); bus_space_unmap(sc->sc_st, sc->sc_sh, iosize); return; } SIMPLEQ_INIT(&sc->sc_freequeue); dmap = sc->sc_dmaa; for (i = 0; i < UBS_MAX_NQUEUE; i++, dmap++) { struct ubsec_q *q; q = (struct ubsec_q *)malloc(sizeof(struct ubsec_q), M_DEVBUF, M_NOWAIT); if (q == NULL) { printf(": can't allocate queue buffers\n"); break; } if (ubsec_dma_malloc(sc, sizeof(struct ubsec_dmachunk), &dmap->d_alloc, 0)) { printf(": can't allocate dma buffers\n"); free(q, M_DEVBUF, 0); break; } dmap->d_dma = (struct ubsec_dmachunk *)dmap->d_alloc.dma_vaddr; q->q_dma = dmap; sc->sc_queuea[i] = q; SIMPLEQ_INSERT_TAIL(&sc->sc_freequeue, q, q_next); } bzero(algs, sizeof(algs)); algs[CRYPTO_3DES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED; algs[CRYPTO_MD5_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; algs[CRYPTO_SHA1_HMAC] = CRYPTO_ALG_FLAG_SUPPORTED; if (sc->sc_flags & UBS_FLAGS_AES) algs[CRYPTO_AES_CBC] = CRYPTO_ALG_FLAG_SUPPORTED; crypto_register(sc->sc_cid, algs, ubsec_newsession, ubsec_freesession, ubsec_process); /* * Reset Broadcom chip */ ubsec_reset_board(sc); /* * Init Broadcom specific PCI settings */ ubsec_init_pciregs(pa); /* * Init Broadcom chip */ ubsec_init_board(sc); printf(": 3DES MD5 SHA1"); if (sc->sc_flags & UBS_FLAGS_AES) printf(" AES"); #ifndef UBSEC_NO_RNG if (sc->sc_flags & UBS_FLAGS_RNG) { if (sc->sc_flags & UBS_FLAGS_RNG4) sc->sc_statmask |= BS_STAT_MCR4_DONE; else sc->sc_statmask |= BS_STAT_MCR2_DONE; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_mcr), &sc->sc_rng.rng_q.q_mcr, 0)) goto skip_rng; if (ubsec_dma_malloc(sc, sizeof(struct ubsec_ctx_rngbypass), &sc->sc_rng.rng_q.q_ctx, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } if (ubsec_dma_malloc(sc, sizeof(u_int32_t) * UBSEC_RNG_BUFSIZ, &sc->sc_rng.rng_buf, 0)) { ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_ctx); ubsec_dma_free(sc, &sc->sc_rng.rng_q.q_mcr); goto skip_rng; } timeout_set(&sc->sc_rngto, ubsec_rng, sc); if (hz >= 100) sc->sc_rnghz = hz / 100; else sc->sc_rnghz = 1; timeout_add(&sc->sc_rngto, sc->sc_rnghz); printf(" RNG"); skip_rng: ; } #endif /* UBSEC_NO_RNG */ if (sc->sc_flags & UBS_FLAGS_KEY) { sc->sc_statmask |= BS_STAT_MCR2_DONE; } printf(", %s\n", intrstr); }
/* * Initialise our interface to the controller. */ int cac_init(struct cac_softc *sc, int startfw) { struct scsibus_attach_args saa; struct cac_controller_info cinfo; int error, rseg, size, i; bus_dma_segment_t seg[1]; struct cac_ccb *ccb; SIMPLEQ_INIT(&sc->sc_ccb_free); SIMPLEQ_INIT(&sc->sc_ccb_queue); size = sizeof(struct cac_ccb) * CAC_MAX_CCBS; if ((error = bus_dmamem_alloc(sc->sc_dmat, size, PAGE_SIZE, 0, seg, 1, &rseg, BUS_DMA_NOWAIT | BUS_DMA_ZERO)) != 0) { printf("%s: unable to allocate CCBs, error = %d\n", sc->sc_dv.dv_xname, error); return (-1); } if ((error = bus_dmamem_map(sc->sc_dmat, seg, rseg, size, &sc->sc_ccbs, BUS_DMA_NOWAIT | BUS_DMA_COHERENT)) != 0) { printf("%s: unable to map CCBs, error = %d\n", sc->sc_dv.dv_xname, error); return (-1); } if ((error = bus_dmamap_create(sc->sc_dmat, size, 1, size, 0, BUS_DMA_NOWAIT, &sc->sc_dmamap)) != 0) { printf("%s: unable to create CCB DMA map, error = %d\n", sc->sc_dv.dv_xname, error); return (-1); } if ((error = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_ccbs, size, NULL, BUS_DMA_NOWAIT)) != 0) { printf("%s: unable to load CCB DMA map, error = %d\n", sc->sc_dv.dv_xname, error); return (-1); } sc->sc_ccbs_paddr = sc->sc_dmamap->dm_segs[0].ds_addr; ccb = (struct cac_ccb *)sc->sc_ccbs; for (i = 0; i < CAC_MAX_CCBS; i++, ccb++) { /* Create the DMA map for this CCB's data */ error = bus_dmamap_create(sc->sc_dmat, CAC_MAX_XFER, CAC_SG_SIZE, CAC_MAX_XFER, 0, BUS_DMA_NOWAIT | BUS_DMA_ALLOCNOW, &ccb->ccb_dmamap_xfer); if (error) { printf("%s: can't create ccb dmamap (%d)\n", sc->sc_dv.dv_xname, error); break; } ccb->ccb_paddr = sc->sc_ccbs_paddr + i * sizeof(struct cac_ccb); SIMPLEQ_INSERT_TAIL(&sc->sc_ccb_free, ccb, ccb_chain); } /* Start firmware background tasks, if needed. */ if (startfw) { if (cac_cmd(sc, CAC_CMD_START_FIRMWARE, &cinfo, sizeof(cinfo), 0, 0, CAC_CCB_DATA_IN, NULL)) { printf("%s: CAC_CMD_START_FIRMWARE failed\n", sc->sc_dv.dv_xname); return (-1); } } if (cac_cmd(sc, CAC_CMD_GET_CTRL_INFO, &cinfo, sizeof(cinfo), 0, 0, CAC_CCB_DATA_IN, NULL)) { printf("%s: CAC_CMD_GET_CTRL_INFO failed\n", sc->sc_dv.dv_xname); return (-1); } if (!cinfo.num_drvs) { printf("%s: no volumes defined\n", sc->sc_dv.dv_xname); return (-1); } sc->sc_nunits = cinfo.num_drvs; sc->sc_dinfos = malloc(cinfo.num_drvs * sizeof(struct cac_drive_info), M_DEVBUF, M_NOWAIT | M_ZERO); if (sc->sc_dinfos == NULL) { printf("%s: cannot allocate memory for drive_info\n", sc->sc_dv.dv_xname); return (-1); } sc->sc_link.adapter_softc = sc; sc->sc_link.adapter = &cac_switch; sc->sc_link.adapter_target = cinfo.num_drvs; sc->sc_link.adapter_buswidth = cinfo.num_drvs; sc->sc_link.openings = CAC_MAX_CCBS / sc->sc_nunits; if (sc->sc_link.openings < 4 ) sc->sc_link.openings = 4; bzero(&saa, sizeof(saa)); saa.saa_sc_link = &sc->sc_link; config_found(&sc->sc_dv, &saa, scsiprint); /* Set our `shutdownhook' before we start any device activity. */ if (cac_sdh == NULL) cac_sdh = shutdownhook_establish(cac_shutdown, NULL); (*sc->sc_cl->cl_intr_enable)(sc, 1); #if NBIO > 0 if (bio_register(&sc->sc_dv, cac_ioctl) != 0) printf("%s: controller registration failed\n", sc->sc_dv.dv_xname); else sc->sc_ioctl = cac_ioctl; #ifndef SMALL_KERNEL if (cac_create_sensors(sc) != 0) printf("%s: unable to create sensors\n", sc->sc_dv.dv_xname); #endif #endif return (0); }
STATIC void cardslotattach(device_t parent, device_t self, void *aux) { struct cardslot_softc *sc = device_private(self); struct cardslot_attach_args *caa = aux; struct cbslot_attach_args *cba = caa->caa_cb_attach; struct pcmciabus_attach_args *pa = caa->caa_16_attach; struct cardbus_softc *csc = NULL; struct pcmcia_softc *psc = NULL; sc->sc_dev = self; sc->sc_cb_softc = NULL; sc->sc_16_softc = NULL; SIMPLEQ_INIT(&sc->sc_events); sc->sc_th_enable = 0; aprint_naive("\n"); aprint_normal("\n"); DPRINTF(("%s attaching CardBus bus...\n", device_xname(self))); if (cba != NULL) { csc = device_private(config_found_ia(self, "cbbus", cba, cardslot_cb_print)); if (csc) { /* cardbus found */ DPRINTF(("%s: found cardbus on %s\n", __func__, device_xname(self))); sc->sc_cb_softc = csc; } } if (pa != NULL) { sc->sc_16_softc = config_found_sm_loc(self, "pcmciabus", NULL, pa, cardslot_16_print, cardslot_16_submatch); if (sc->sc_16_softc) { /* pcmcia 16-bit bus found */ DPRINTF(("%s: found 16-bit pcmcia bus\n", __func__)); psc = device_private(sc->sc_16_softc); } } if (csc != NULL || psc != NULL) { config_pending_incr(self); if (kthread_create(PRI_NONE, 0, NULL, cardslot_event_thread, sc, &sc->sc_event_thread, "%s", device_xname(self))) { aprint_error_dev(sc->sc_dev, "unable to create thread\n"); panic("cardslotattach"); } sc->sc_th_enable = 1; } if (csc && (csc->sc_cf->cardbus_ctrl)(csc->sc_cc, CARDBUS_CD)) { DPRINTF(("%s: CardBus card found\n", __func__)); /* attach deferred */ cardslot_event_throw(sc, CARDSLOT_EVENT_INSERTION_CB); } if (psc && (psc->pct->card_detect)(psc->pch)) { DPRINTF(("%s: 16-bit card found\n", __func__)); /* attach deferred */ cardslot_event_throw(sc, CARDSLOT_EVENT_INSERTION_16); } if (!pmf_device_register(self, NULL, NULL)) aprint_error_dev(self, "couldn't establish power handler\n"); }
void at91spi_attach_common(device_t parent, device_t self, void *aux, at91spi_machdep_tag_t md) { struct at91spi_softc *sc = device_private(self); struct at91bus_attach_args *sa = aux; struct spibus_attach_args sba; bus_dma_segment_t segs; int rsegs, err; aprint_normal(": AT91 SPI Controller\n"); sc->sc_dev = self; sc->sc_iot = sa->sa_iot; sc->sc_pid = sa->sa_pid; sc->sc_dmat = sa->sa_dmat; sc->sc_md = md; if (bus_space_map(sa->sa_iot, sa->sa_addr, sa->sa_size, 0, &sc->sc_ioh)) panic("%s: Cannot map registers", device_xname(self)); /* we want to use dma, so allocate dma memory: */ err = bus_dmamem_alloc(sc->sc_dmat, PAGE_SIZE, 0, PAGE_SIZE, &segs, 1, &rsegs, BUS_DMA_WAITOK); if (err == 0) { err = bus_dmamem_map(sc->sc_dmat, &segs, 1, PAGE_SIZE, &sc->sc_dmapage, BUS_DMA_WAITOK); } if (err == 0) { err = bus_dmamap_create(sc->sc_dmat, PAGE_SIZE, 1, PAGE_SIZE, 0, BUS_DMA_WAITOK, &sc->sc_dmamap); } if (err == 0) { err = bus_dmamap_load(sc->sc_dmat, sc->sc_dmamap, sc->sc_dmapage, PAGE_SIZE, NULL, BUS_DMA_WAITOK); } if (err != 0) { panic("%s: Cannot get DMA memory", device_xname(sc->sc_dev)); } sc->sc_dmaaddr = sc->sc_dmamap->dm_segs[0].ds_addr; /* * Initialize SPI controller */ sc->sc_spi.sct_cookie = sc; sc->sc_spi.sct_configure = at91spi_configure; sc->sc_spi.sct_transfer = at91spi_transfer; //sc->sc_spi.sct_nslaves must have been initialized by machdep code if (!sc->sc_spi.sct_nslaves) { aprint_error("%s: no slaves!\n", device_xname(sc->sc_dev)); } sba.sba_controller = &sc->sc_spi; /* initialize the queue */ SIMPLEQ_INIT(&sc->sc_q); /* reset the SPI */ at91_peripheral_clock(sc->sc_pid, 1); PUTREG(sc, SPI_CR, SPI_CR_SWRST); delay(100); /* be paranoid and make sure the PDC is dead */ PUTREG(sc, SPI_PDC_BASE + PDC_PTCR, PDC_PTCR_TXTDIS | PDC_PTCR_RXTDIS); PUTREG(sc, SPI_PDC_BASE + PDC_RNCR, 0); PUTREG(sc, SPI_PDC_BASE + PDC_RCR, 0); PUTREG(sc, SPI_PDC_BASE + PDC_TNCR, 0); PUTREG(sc, SPI_PDC_BASE + PDC_TCR, 0); // configure SPI: PUTREG(sc, SPI_IDR, -1); PUTREG(sc, SPI_CSR(0), SPI_CSR_SCBR | SPI_CSR_BITS_8); PUTREG(sc, SPI_CSR(1), SPI_CSR_SCBR | SPI_CSR_BITS_8); PUTREG(sc, SPI_CSR(2), SPI_CSR_SCBR | SPI_CSR_BITS_8); PUTREG(sc, SPI_CSR(3), SPI_CSR_SCBR | SPI_CSR_BITS_8); PUTREG(sc, SPI_MR, SPI_MR_MODFDIS/* <- machdep? */ | SPI_MR_MSTR); /* enable device interrupts */ sc->sc_ih = at91_intr_establish(sc->sc_pid, IPL_BIO, INTR_HIGH_LEVEL, at91spi_intr, sc); /* enable SPI */ PUTREG(sc, SPI_CR, SPI_CR_SPIEN); if (GETREG(sc, SPI_SR) & SPI_SR_RDRF) (void)GETREG(sc, SPI_RDR); PUTREG(sc, SPI_PDC_BASE + PDC_PTCR, PDC_PTCR_TXTEN | PDC_PTCR_RXTEN); /* attach slave devices */ (void) config_found_ia(sc->sc_dev, "spibus", &sba, spibus_print); }
/* * Attach the interface. Allocate softc structures, do ifmedia * setup and ethernet/BPF attach. */ void rtk_attach(struct rtk_softc *sc) { device_t self = sc->sc_dev; struct ifnet *ifp; struct rtk_tx_desc *txd; uint16_t val; uint8_t eaddr[ETHER_ADDR_LEN]; int error; int i, addr_len; callout_init(&sc->rtk_tick_ch, 0); /* * Check EEPROM type 9346 or 9356. */ if (rtk_read_eeprom(sc, RTK_EE_ID, RTK_EEADDR_LEN1) == 0x8129) addr_len = RTK_EEADDR_LEN1; else addr_len = RTK_EEADDR_LEN0; /* * Get station address. */ val = rtk_read_eeprom(sc, RTK_EE_EADDR0, addr_len); eaddr[0] = val & 0xff; eaddr[1] = val >> 8; val = rtk_read_eeprom(sc, RTK_EE_EADDR1, addr_len); eaddr[2] = val & 0xff; eaddr[3] = val >> 8; val = rtk_read_eeprom(sc, RTK_EE_EADDR2, addr_len); eaddr[4] = val & 0xff; eaddr[5] = val >> 8; if ((error = bus_dmamem_alloc(sc->sc_dmat, RTK_RXBUFLEN + 16, PAGE_SIZE, 0, &sc->sc_dmaseg, 1, &sc->sc_dmanseg, BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(self, "can't allocate recv buffer, error = %d\n", error); goto fail_0; } if ((error = bus_dmamem_map(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg, RTK_RXBUFLEN + 16, (void **)&sc->rtk_rx_buf, BUS_DMA_NOWAIT|BUS_DMA_COHERENT)) != 0) { aprint_error_dev(self, "can't map recv buffer, error = %d\n", error); goto fail_1; } if ((error = bus_dmamap_create(sc->sc_dmat, RTK_RXBUFLEN + 16, 1, RTK_RXBUFLEN + 16, 0, BUS_DMA_NOWAIT, &sc->recv_dmamap)) != 0) { aprint_error_dev(self, "can't create recv buffer DMA map, error = %d\n", error); goto fail_2; } if ((error = bus_dmamap_load(sc->sc_dmat, sc->recv_dmamap, sc->rtk_rx_buf, RTK_RXBUFLEN + 16, NULL, BUS_DMA_READ|BUS_DMA_NOWAIT)) != 0) { aprint_error_dev(self, "can't load recv buffer DMA map, error = %d\n", error); goto fail_3; } for (i = 0; i < RTK_TX_LIST_CNT; i++) { txd = &sc->rtk_tx_descs[i]; if ((error = bus_dmamap_create(sc->sc_dmat, MCLBYTES, 1, MCLBYTES, 0, BUS_DMA_NOWAIT, &txd->txd_dmamap)) != 0) { aprint_error_dev(self, "can't create snd buffer DMA map, error = %d\n", error); goto fail_4; } txd->txd_txaddr = RTK_TXADDR0 + (i * 4); txd->txd_txstat = RTK_TXSTAT0 + (i * 4); } SIMPLEQ_INIT(&sc->rtk_tx_free); SIMPLEQ_INIT(&sc->rtk_tx_dirty); /* * From this point forward, the attachment cannot fail. A failure * before this releases all resources thar may have been * allocated. */ sc->sc_flags |= RTK_ATTACHED; /* Reset the adapter. */ rtk_reset(sc); aprint_normal_dev(self, "Ethernet address %s\n", ether_sprintf(eaddr)); ifp = &sc->ethercom.ec_if; ifp->if_softc = sc; strcpy(ifp->if_xname, device_xname(self)); ifp->if_flags = IFF_BROADCAST | IFF_SIMPLEX | IFF_MULTICAST; ifp->if_ioctl = rtk_ioctl; ifp->if_start = rtk_start; ifp->if_watchdog = rtk_watchdog; ifp->if_init = rtk_init; ifp->if_stop = rtk_stop; IFQ_SET_READY(&ifp->if_snd); /* * Do ifmedia setup. */ sc->mii.mii_ifp = ifp; sc->mii.mii_readreg = rtk_phy_readreg; sc->mii.mii_writereg = rtk_phy_writereg; sc->mii.mii_statchg = rtk_phy_statchg; sc->ethercom.ec_mii = &sc->mii; ifmedia_init(&sc->mii.mii_media, IFM_IMASK, ether_mediachange, ether_mediastatus); mii_attach(self, &sc->mii, 0xffffffff, MII_PHY_ANY, MII_OFFSET_ANY, 0); /* Choose a default media. */ if (LIST_FIRST(&sc->mii.mii_phys) == NULL) { ifmedia_add(&sc->mii.mii_media, IFM_ETHER|IFM_NONE, 0, NULL); ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_NONE); } else { ifmedia_set(&sc->mii.mii_media, IFM_ETHER|IFM_AUTO); } /* * Call MI attach routines. */ if_attach(ifp); ether_ifattach(ifp, eaddr); rnd_attach_source(&sc->rnd_source, device_xname(self), RND_TYPE_NET, RND_FLAG_DEFAULT); return; fail_4: for (i = 0; i < RTK_TX_LIST_CNT; i++) { txd = &sc->rtk_tx_descs[i]; if (txd->txd_dmamap != NULL) bus_dmamap_destroy(sc->sc_dmat, txd->txd_dmamap); } fail_3: bus_dmamap_destroy(sc->sc_dmat, sc->recv_dmamap); fail_2: bus_dmamem_unmap(sc->sc_dmat, sc->rtk_rx_buf, RTK_RXBUFLEN + 16); fail_1: bus_dmamem_free(sc->sc_dmat, &sc->sc_dmaseg, sc->sc_dmanseg); fail_0: return; }