static int mfi_disk_attach(device_t dev) { struct mfi_disk *sc; struct mfi_ld_info *ld_info; struct disk_info info; uint64_t sectors; uint32_t secsize; char *state; sc = device_get_softc(dev); ld_info = device_get_ivars(dev); sc->ld_dev = dev; sc->ld_id = ld_info->ld_config.properties.ld.v.target_id; sc->ld_unit = device_get_unit(dev); sc->ld_info = ld_info; sc->ld_controller = device_get_softc(device_get_parent(dev)); sc->ld_flags = 0; sectors = ld_info->size; secsize = MFI_SECTOR_LEN; lockmgr(&sc->ld_controller->mfi_io_lock, LK_EXCLUSIVE); TAILQ_INSERT_TAIL(&sc->ld_controller->mfi_ld_tqh, sc, ld_link); lockmgr(&sc->ld_controller->mfi_io_lock, LK_RELEASE); switch (ld_info->ld_config.params.state) { case MFI_LD_STATE_OFFLINE: state = "offline"; break; case MFI_LD_STATE_PARTIALLY_DEGRADED: state = "partially degraded"; break; case MFI_LD_STATE_DEGRADED: state = "degraded"; break; case MFI_LD_STATE_OPTIMAL: state = "optimal"; break; default: state = "unknown"; break; } device_printf(dev, "%juMB (%ju sectors) RAID volume '%s' is %s\n", sectors / (1024 * 1024 / secsize), sectors, ld_info->ld_config.properties.name, state); devstat_add_entry(&sc->ld_devstat, "mfid", device_get_unit(dev), MFI_SECTOR_LEN, DEVSTAT_NO_ORDERED_TAGS, DEVSTAT_TYPE_STORARRAY | DEVSTAT_TYPE_IF_OTHER, DEVSTAT_PRIORITY_ARRAY); sc->ld_dev_t = disk_create(sc->ld_unit, &sc->ld_disk, &mfi_disk_ops); sc->ld_dev_t->si_drv1 = sc; sc->ld_dev_t->si_iosize_max = min(sc->ld_controller->mfi_max_io * secsize, (sc->ld_controller->mfi_max_sge - 1) * PAGE_SIZE); bzero(&info, sizeof(info)); info.d_media_blksize = secsize; /* mandatory */ info.d_media_blocks = sectors; if (info.d_media_blocks >= (1 * 1024 * 1024)) { info.d_nheads = 255; info.d_secpertrack = 63; } else { info.d_nheads = 64; info.d_secpertrack = 32; } disk_setdiskinfo(&sc->ld_disk, &info); return (0); }
static int uhid_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct uhid_softc *sc = device_get_softc(dev); int unit = device_get_unit(dev); int error = 0; DPRINTFN(10, "sc=%p\n", sc); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, "uhid lock", NULL, MTX_DEF | MTX_RECURSE); sc->sc_udev = uaa->device; sc->sc_iface_no = uaa->info.bIfaceNum; sc->sc_iface_index = uaa->info.bIfaceIndex; error = usbd_transfer_setup(uaa->device, &uaa->info.bIfaceIndex, sc->sc_xfer, uhid_config, UHID_N_TRANSFER, sc, &sc->sc_mtx); if (error) { DPRINTF("error=%s\n", usbd_errstr(error)); goto detach; } if (uaa->info.idVendor == USB_VENDOR_WACOM) { /* the report descriptor for the Wacom Graphire is broken */ if (uaa->info.idProduct == USB_PRODUCT_WACOM_GRAPHIRE) { sc->sc_repdesc_size = sizeof(uhid_graphire_report_descr); sc->sc_repdesc_ptr = (void *)&uhid_graphire_report_descr; sc->sc_flags |= UHID_FLAG_STATIC_DESC; } else if (uaa->info.idProduct == USB_PRODUCT_WACOM_GRAPHIRE3_4X5) { static uint8_t reportbuf[] = {2, 2, 2}; /* * The Graphire3 needs 0x0202 to be written to * feature report ID 2 before it'll start * returning digitizer data. */ error = usbd_req_set_report(uaa->device, NULL, reportbuf, sizeof(reportbuf), uaa->info.bIfaceIndex, UHID_FEATURE_REPORT, 2); if (error) { DPRINTF("set report failed, error=%s (ignored)\n", usbd_errstr(error)); } sc->sc_repdesc_size = sizeof(uhid_graphire3_4x5_report_descr); sc->sc_repdesc_ptr = (void *)&uhid_graphire3_4x5_report_descr; sc->sc_flags |= UHID_FLAG_STATIC_DESC; } } else if ((uaa->info.bInterfaceClass == UICLASS_VENDOR) && (uaa->info.bInterfaceSubClass == UISUBCLASS_XBOX360_CONTROLLER) && (uaa->info.bInterfaceProtocol == UIPROTO_XBOX360_GAMEPAD)) { /* the Xbox 360 gamepad has no report descriptor */ sc->sc_repdesc_size = sizeof(uhid_xb360gp_report_descr); sc->sc_repdesc_ptr = (void *)&uhid_xb360gp_report_descr; sc->sc_flags |= UHID_FLAG_STATIC_DESC; } if (sc->sc_repdesc_ptr == NULL) { error = usbd_req_get_hid_desc(uaa->device, NULL, &sc->sc_repdesc_ptr, &sc->sc_repdesc_size, M_USBDEV, uaa->info.bIfaceIndex); if (error) { device_printf(dev, "no report descriptor\n"); goto detach; } } error = usbd_req_set_idle(uaa->device, NULL, uaa->info.bIfaceIndex, 0, 0); if (error) { DPRINTF("set idle failed, error=%s (ignored)\n", usbd_errstr(error)); } sc->sc_isize = hid_report_size (sc->sc_repdesc_ptr, sc->sc_repdesc_size, hid_input, &sc->sc_iid); sc->sc_osize = hid_report_size (sc->sc_repdesc_ptr, sc->sc_repdesc_size, hid_output, &sc->sc_oid); sc->sc_fsize = hid_report_size (sc->sc_repdesc_ptr, sc->sc_repdesc_size, hid_feature, &sc->sc_fid); if (sc->sc_isize > UHID_BSIZE) { DPRINTF("input size is too large, " "%d bytes (truncating)\n", sc->sc_isize); sc->sc_isize = UHID_BSIZE; } if (sc->sc_osize > UHID_BSIZE) { DPRINTF("output size is too large, " "%d bytes (truncating)\n", sc->sc_osize); sc->sc_osize = UHID_BSIZE; } if (sc->sc_fsize > UHID_BSIZE) { DPRINTF("feature size is too large, " "%d bytes (truncating)\n", sc->sc_fsize); sc->sc_fsize = UHID_BSIZE; } error = usb_fifo_attach(uaa->device, sc, &sc->sc_mtx, &uhid_fifo_methods, &sc->sc_fifo, unit, -1, uaa->info.bIfaceIndex, UID_ROOT, GID_OPERATOR, 0644); if (error) { goto detach; } return (0); /* success */ detach: uhid_detach(dev); return (ENOMEM); }
static int ad_strategy(struct dev_strategy_args *ap) { device_t dev = ap->a_head.a_dev->si_drv1; struct bio *bp = ap->a_bio; struct buf *bbp = bp->bio_buf; struct ata_device *atadev = device_get_softc(dev); struct ata_request *request; struct ad_softc *adp = device_get_ivars(dev); if (!(request = ata_alloc_request())) { device_printf(dev, "FAILURE - out of memory in strategy\n"); bbp->b_flags |= B_ERROR; bbp->b_error = ENOMEM; biodone(bp); return(0); } /* setup request */ request->dev = dev; request->bio = bp; request->callback = ad_done; request->timeout = ATA_DEFAULT_TIMEOUT; request->retries = 2; request->data = bbp->b_data; request->bytecount = bbp->b_bcount; /* lba is block granularity, convert byte granularity bio_offset */ request->u.ata.lba = (u_int64_t)(bp->bio_offset >> DEV_BSHIFT); request->u.ata.count = request->bytecount / DEV_BSIZE; request->transfersize = min(bbp->b_bcount, atadev->max_iosize); switch (bbp->b_cmd) { case BUF_CMD_READ: request->flags = ATA_R_READ; if (atadev->mode >= ATA_DMA) { request->u.ata.command = ATA_READ_DMA; request->flags |= ATA_R_DMA; } else if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_READ_MUL; else request->u.ata.command = ATA_READ; break; case BUF_CMD_WRITE: request->flags = ATA_R_WRITE; if (atadev->mode >= ATA_DMA) { request->u.ata.command = ATA_WRITE_DMA; request->flags |= ATA_R_DMA; } else if (request->transfersize > DEV_BSIZE) request->u.ata.command = ATA_WRITE_MUL; else request->u.ata.command = ATA_WRITE; break; case BUF_CMD_FLUSH: request->u.ata.lba = 0; request->u.ata.count = 0; request->u.ata.feature = 0; request->bytecount = 0; request->transfersize = 0; request->flags = ATA_R_CONTROL; request->u.ata.command = ATA_FLUSHCACHE; /* ATA FLUSHCACHE requests may take up to 30 sec to timeout */ request->timeout = 30; break; default: device_printf(dev, "FAILURE - unknown BUF operation\n"); ata_free_request(request); bbp->b_flags |= B_ERROR; bbp->b_error = EIO; biodone(bp); return(0); } request->flags |= ATA_R_ORDERED; devstat_start_transaction(&adp->stats); ata_queue_request(request); return(0); }
/* * This implementation simply passes the request up to the parent * bus, which in our case is the pci chipset device, substituting any * configured values if the caller defaulted. We can get away with * this because there is no special mapping for ISA resources on this * platform. When porting this code to another architecture, it may be * necessary to interpose a mapping layer here. * * We manage our own interrupt resources since ISA interrupts go through * the ISA PIC, not the PCI interrupt controller. */ struct resource * isa_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { /* * Consider adding a resource definition. We allow rid 0-1 for * irq and drq, 0-3 for memory and 0-7 for ports which is * sufficient for isapnp. */ int passthrough = (device_get_parent(child) != bus); int isdefault = (start == 0UL && end == ~0UL); struct isa_device* idev = DEVTOISA(child); struct resource_list *rl = &idev->id_resources; struct resource_list_entry *rle; struct resource *res; if (!passthrough && !isdefault) { rle = resource_list_find(rl, type, *rid); if (!rle) { if (*rid < 0) return 0; switch (type) { case SYS_RES_IRQ: if (*rid >= ISA_NIRQ) return 0; break; case SYS_RES_DRQ: if (*rid >= ISA_NDRQ) return 0; break; case SYS_RES_MEMORY: if (*rid >= ISA_NMEM) return 0; break; case SYS_RES_IOPORT: if (*rid >= ISA_NPORT) return 0; break; default: return 0; } resource_list_add(rl, type, *rid, start, end, count); } } if (type != SYS_RES_IRQ && type != SYS_RES_DRQ) return resource_list_alloc(rl, bus, child, type, rid, start, end, count, flags); if (!passthrough) { rl = device_get_ivars(child); rle = resource_list_find(rl, type, *rid); if (!rle) return 0; if (rle->res) panic("isa_alloc_resource: resource entry is busy"); if (isdefault) { start = end = rle->start; count = 1; } } if (type == SYS_RES_IRQ) res = rman_reserve_resource(&isa_irq_rman, start, start, 1, 0, child); else res = rman_reserve_resource(&isa_drq_rman, start, start, 1, 0, child); if (res && !passthrough) { rle = resource_list_find(rl, type, *rid); rle->start = rman_get_start(res); rle->end = rman_get_end(res); rle->count = 1; rle->res = res; } return res; }
static int ubt_attach(device_t dev) { struct usb_attach_arg *uaa = device_get_ivars(dev); struct ubt_softc *sc = device_get_softc(dev); struct usb_endpoint_descriptor *ed; struct usb_interface_descriptor *id; struct usb_interface *iface; uint16_t wMaxPacketSize; uint8_t alt_index, i, j; uint8_t iface_index[2] = { 0, 1 }; device_set_usb_desc(dev); sc->sc_dev = dev; sc->sc_debug = NG_UBT_WARN_LEVEL; /* * Create Netgraph node */ if (ng_make_node_common(&typestruct, &sc->sc_node) != 0) { UBT_ALERT(sc, "could not create Netgraph node\n"); return (ENXIO); } /* Name Netgraph node */ if (ng_name_node(sc->sc_node, device_get_nameunit(dev)) != 0) { UBT_ALERT(sc, "could not name Netgraph node\n"); NG_NODE_UNREF(sc->sc_node); return (ENXIO); } NG_NODE_SET_PRIVATE(sc->sc_node, sc); NG_NODE_FORCE_WRITER(sc->sc_node); /* * Initialize device softc structure */ /* initialize locks */ mtx_init(&sc->sc_ng_mtx, "ubt ng", NULL, MTX_DEF); mtx_init(&sc->sc_if_mtx, "ubt if", NULL, MTX_DEF | MTX_RECURSE); /* initialize packet queues */ NG_BT_MBUFQ_INIT(&sc->sc_cmdq, UBT_DEFAULT_QLEN); NG_BT_MBUFQ_INIT(&sc->sc_aclq, UBT_DEFAULT_QLEN); NG_BT_MBUFQ_INIT(&sc->sc_scoq, UBT_DEFAULT_QLEN); /* initialize glue task */ TASK_INIT(&sc->sc_task, 0, ubt_task, sc); /* * Configure Bluetooth USB device. Discover all required USB * interfaces and endpoints. * * USB device must present two interfaces: * 1) Interface 0 that has 3 endpoints * 1) Interrupt endpoint to receive HCI events * 2) Bulk IN endpoint to receive ACL data * 3) Bulk OUT endpoint to send ACL data * * 2) Interface 1 then has 2 endpoints * 1) Isochronous IN endpoint to receive SCO data * 2) Isochronous OUT endpoint to send SCO data * * Interface 1 (with isochronous endpoints) has several alternate * configurations with different packet size. */ /* * For interface #1 search alternate settings, and find * the descriptor with the largest wMaxPacketSize */ wMaxPacketSize = 0; alt_index = 0; i = 0; j = 0; ed = NULL; /* * Search through all the descriptors looking for the largest * packet size: */ while ((ed = (struct usb_endpoint_descriptor *)usb_desc_foreach( usbd_get_config_descriptor(uaa->device), (struct usb_descriptor *)ed))) { if ((ed->bDescriptorType == UDESC_INTERFACE) && (ed->bLength >= sizeof(*id))) { id = (struct usb_interface_descriptor *)ed; i = id->bInterfaceNumber; j = id->bAlternateSetting; } if ((ed->bDescriptorType == UDESC_ENDPOINT) && (ed->bLength >= sizeof(*ed)) && (i == 1)) { uint16_t temp; temp = UGETW(ed->wMaxPacketSize); if (temp > wMaxPacketSize) { wMaxPacketSize = temp; alt_index = j; } } } /* Set alt configuration on interface #1 only if we found it */ if (wMaxPacketSize > 0 && usbd_set_alt_interface_index(uaa->device, 1, alt_index)) { UBT_ALERT(sc, "could not set alternate setting %d " \ "for interface 1!\n", alt_index); goto detach; } /* Setup transfers for both interfaces */ if (usbd_transfer_setup(uaa->device, iface_index, sc->sc_xfer, ubt_config, UBT_N_TRANSFER, sc, &sc->sc_if_mtx)) { UBT_ALERT(sc, "could not allocate transfers\n"); goto detach; } /* Claim all interfaces belonging to the Bluetooth part */ for (i = 1;; i++) { iface = usbd_get_iface(uaa->device, i); if (iface == NULL) break; id = usbd_get_interface_descriptor(iface); if ((id != NULL) && (id->bInterfaceClass == UICLASS_WIRELESS) && (id->bInterfaceSubClass == UISUBCLASS_RF) && (id->bInterfaceProtocol == UIPROTO_BLUETOOTH)) { usbd_set_parent_iface(uaa->device, i, uaa->info.bIfaceIndex); } } return (0); /* success */ detach: ubt_detach(dev); return (ENXIO); } /* ubt_attach */
static const struct ofw_bus_devinfo * ofw_cpulist_get_devinfo(device_t dev, device_t child) { return (device_get_ivars(child)); }
static struct resource * unin_chip_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct unin_chip_softc *sc; int needactivate; struct resource *rv; struct rman *rm; u_long adjstart, adjend, adjcount; struct unin_chip_devinfo *dinfo; struct resource_list_entry *rle; sc = device_get_softc(bus); dinfo = device_get_ivars(child); needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: case SYS_RES_IOPORT: rle = resource_list_find(&dinfo->udi_resources, SYS_RES_MEMORY, *rid); if (rle == NULL) { device_printf(bus, "no rle for %s memory %d\n", device_get_nameunit(child), *rid); return (NULL); } rle->end = rle->end - 1; /* Hack? */ if (start < rle->start) adjstart = rle->start; else if (start > rle->end) adjstart = rle->end; else adjstart = start; if (end < rle->start) adjend = rle->start; else if (end > rle->end) adjend = rle->end; else adjend = end; adjcount = adjend - adjstart; rm = &sc->sc_mem_rman; break; case SYS_RES_IRQ: /* Check for passthrough from subattachments. */ if (device_get_parent(child) != bus) return BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags); rle = resource_list_find(&dinfo->udi_resources, SYS_RES_IRQ, *rid); if (rle == NULL) { if (dinfo->udi_ninterrupts >= 6) { device_printf(bus, "%s has more than 6 interrupts\n", device_get_nameunit(child)); return (NULL); } resource_list_add(&dinfo->udi_resources, SYS_RES_IRQ, dinfo->udi_ninterrupts, start, start, 1); dinfo->udi_interrupts[dinfo->udi_ninterrupts] = start; dinfo->udi_ninterrupts++; } return (resource_list_alloc(&dinfo->udi_resources, bus, child, type, rid, start, end, count, flags)); default: device_printf(bus, "unknown resource request from %s\n", device_get_nameunit(child)); return (NULL); } rv = rman_reserve_resource(rm, adjstart, adjend, adjcount, flags, child); if (rv == NULL) { device_printf(bus, "failed to reserve resource %#lx - %#lx (%#lx)" " for %s\n", adjstart, adjend, adjcount, device_get_nameunit(child)); return (NULL); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { device_printf(bus, "failed to activate resource for %s\n", device_get_nameunit(child)); rman_release_resource(rv); return (NULL); } } return (rv); }
struct resource * fhc_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct resource_list_entry *rle; struct fhc_devinfo *fdi; struct fhc_softc *sc; struct resource *res; bus_addr_t coffset; bus_addr_t cend; bus_addr_t phys; int isdefault; uint32_t map; uint32_t vec; int i; isdefault = (start == 0UL && end == ~0UL); res = NULL; sc = device_get_softc(bus); switch (type) { case SYS_RES_IRQ: if (!isdefault || count != 1 || *rid < FHC_FANFAIL || *rid > FHC_TOD) break; map = bus_space_read_4(sc->sc_bt[*rid], sc->sc_bh[*rid], FHC_IMAP); vec = INTINO(map) | (sc->sc_ign << INTMAP_IGN_SHIFT); bus_space_write_4(sc->sc_bt[*rid], sc->sc_bh[*rid], FHC_IMAP, vec); bus_space_read_4(sc->sc_bt[*rid], sc->sc_bh[*rid], FHC_IMAP); res = bus_generic_alloc_resource(bus, child, type, rid, vec, vec, 1, flags); if (res != NULL) rman_set_rid(res, *rid); break; case SYS_RES_MEMORY: fdi = device_get_ivars(child); rle = resource_list_find(&fdi->fdi_rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("fhc_alloc_resource: resource entry is busy"); if (isdefault) { start = rle->start; count = ulmax(count, rle->count); end = ulmax(rle->end, start + count - 1); } for (i = 0; i < sc->sc_nrange; i++) { coffset = sc->sc_ranges[i].coffset; cend = coffset + sc->sc_ranges[i].size - 1; if (start >= coffset && end <= cend) { start -= coffset; end -= coffset; phys = sc->sc_ranges[i].poffset | ((bus_addr_t)sc->sc_ranges[i].pspace << 32); res = bus_generic_alloc_resource(bus, child, type, rid, phys + start, phys + end, count, flags); rle->res = res; break; } } break; default: break; } return (res); }
static void mmc_discover_cards(struct mmc_softc *sc) { struct mmc_ivars *ivar = NULL; device_t *devlist; int err, i, devcount, newcard; uint32_t raw_cid[4]; uint32_t resp, sec_count; device_t child; uint16_t rca = 2; u_char switch_res[64]; if (bootverbose || mmc_debug) device_printf(sc->dev, "Probing cards\n"); while (1) { err = mmc_all_send_cid(sc, raw_cid); if (err == MMC_ERR_TIMEOUT) break; if (err != MMC_ERR_NONE) { device_printf(sc->dev, "Error reading CID %d\n", err); break; } newcard = 1; if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0) return; for (i = 0; i < devcount; i++) { ivar = device_get_ivars(devlist[i]); if (memcmp(ivar->raw_cid, raw_cid, sizeof(raw_cid)) == 0) { newcard = 0; break; } } kfree(devlist, M_TEMP); if (bootverbose || mmc_debug) { device_printf(sc->dev, "%sard detected (CID %08x%08x%08x%08x)\n", newcard ? "New c" : "C", raw_cid[0], raw_cid[1], raw_cid[2], raw_cid[3]); } if (newcard) { ivar = kmalloc(sizeof(struct mmc_ivars), M_DEVBUF, M_WAITOK | M_ZERO); memcpy(ivar->raw_cid, raw_cid, sizeof(raw_cid)); } if (mmcbr_get_ro(sc->dev)) ivar->read_only = 1; ivar->bus_width = bus_width_1; ivar->timing = bus_timing_normal; ivar->mode = mmcbr_get_mode(sc->dev); if (ivar->mode == mode_sd) { mmc_decode_cid_sd(ivar->raw_cid, &ivar->cid); mmc_send_relative_addr(sc, &resp); ivar->rca = resp >> 16; /* Get card CSD. */ mmc_send_csd(sc, ivar->rca, ivar->raw_csd); mmc_decode_csd_sd(ivar->raw_csd, &ivar->csd); ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE; if (ivar->csd.csd_structure > 0) ivar->high_cap = 1; ivar->tran_speed = ivar->csd.tran_speed; ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; /* Get card SCR. Card must be selected to fetch it. */ mmc_select_card(sc, ivar->rca); mmc_app_send_scr(sc, ivar->rca, ivar->raw_scr); mmc_app_decode_scr(ivar->raw_scr, &ivar->scr); /* Get card switch capabilities (command class 10). */ if ((ivar->scr.sda_vsn >= 1) && (ivar->csd.ccc & (1<<10))) { mmc_sd_switch(sc, SD_SWITCH_MODE_CHECK, SD_SWITCH_GROUP1, SD_SWITCH_NOCHANGE, switch_res); if (switch_res[13] & 2) { ivar->timing = bus_timing_hs; ivar->hs_tran_speed = SD_MAX_HS; } } mmc_app_sd_status(sc, ivar->rca, ivar->raw_sd_status); mmc_app_decode_sd_status(ivar->raw_sd_status, &ivar->sd_status); if (ivar->sd_status.au_size != 0) { ivar->erase_sector = 16 << ivar->sd_status.au_size; } mmc_select_card(sc, 0); /* Find max supported bus width. */ if ((mmcbr_get_caps(sc->dev) & MMC_CAP_4_BIT_DATA) && (ivar->scr.bus_widths & SD_SCR_BUS_WIDTH_4)) ivar->bus_width = bus_width_4; if (bootverbose || mmc_debug) mmc_log_card(sc->dev, ivar, newcard); if (newcard) { /* Add device. */ child = device_add_child(sc->dev, NULL, -1); device_set_ivars(child, ivar); } return; } mmc_decode_cid_mmc(ivar->raw_cid, &ivar->cid); ivar->rca = rca++; mmc_set_relative_addr(sc, ivar->rca); /* Get card CSD. */ mmc_send_csd(sc, ivar->rca, ivar->raw_csd); mmc_decode_csd_mmc(ivar->raw_csd, &ivar->csd); ivar->sec_count = ivar->csd.capacity / MMC_SECTOR_SIZE; ivar->tran_speed = ivar->csd.tran_speed; ivar->erase_sector = ivar->csd.erase_sector * ivar->csd.write_bl_len / MMC_SECTOR_SIZE; /* Only MMC >= 4.x cards support EXT_CSD. */ if (ivar->csd.spec_vers >= 4) { /* Card must be selected to fetch EXT_CSD. */ mmc_select_card(sc, ivar->rca); mmc_send_ext_csd(sc, ivar->raw_ext_csd); /* Handle extended capacity from EXT_CSD */ sec_count = ivar->raw_ext_csd[EXT_CSD_SEC_CNT] + (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 1] << 8) + (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 2] << 16) + (ivar->raw_ext_csd[EXT_CSD_SEC_CNT + 3] << 24); if (sec_count != 0) { ivar->sec_count = sec_count; ivar->high_cap = 1; } /* Get card speed in high speed mode. */ ivar->timing = bus_timing_hs; if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_52) ivar->hs_tran_speed = MMC_TYPE_52_MAX_HS; else if (ivar->raw_ext_csd[EXT_CSD_CARD_TYPE] & EXT_CSD_CARD_TYPE_26) ivar->hs_tran_speed = MMC_TYPE_26_MAX_HS; else ivar->hs_tran_speed = ivar->tran_speed; /* Find max supported bus width. */ ivar->bus_width = mmc_test_bus_width(sc); mmc_select_card(sc, 0); /* Handle HC erase sector size. */ if (ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE] != 0) { ivar->erase_sector = 1024 * ivar->raw_ext_csd[EXT_CSD_ERASE_GRP_SIZE]; mmc_switch(sc, EXT_CSD_CMD_SET_NORMAL, EXT_CSD_ERASE_GRP_DEF, 1); } } else { ivar->bus_width = bus_width_1; ivar->timing = bus_timing_normal; } if (bootverbose || mmc_debug) mmc_log_card(sc->dev, ivar, newcard); if (newcard) { /* Add device. */ child = device_add_child(sc->dev, NULL, -1); device_set_ivars(child, ivar); } }
/* Attach the PHY to the MII bus */ static int brgphy_attach(device_t dev) { struct brgphy_softc *bsc; struct bge_softc *bge_sc = NULL; struct bce_softc *bce_sc = NULL; struct mii_softc *sc; struct mii_attach_args *ma; struct mii_data *mii; struct ifnet *ifp; int fast_ether; bsc = device_get_softc(dev); sc = &bsc->mii_sc; ma = device_get_ivars(dev); sc->mii_dev = device_get_parent(dev); mii = device_get_softc(sc->mii_dev); LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list); /* Initialize mii_softc structure */ sc->mii_inst = mii->mii_instance; sc->mii_phy = ma->mii_phyno; sc->mii_service = brgphy_service; sc->mii_pdata = mii; sc->mii_anegticks = MII_ANEGTICKS_GIGE; sc->mii_flags |= MIIF_NOISOLATE | MIIF_NOLOOP; mii->mii_instance++; /* Initialize brgphy_softc structure */ bsc->mii_oui = MII_OUI(ma->mii_id1, ma->mii_id2); bsc->mii_model = MII_MODEL(ma->mii_id2); bsc->mii_rev = MII_REV(ma->mii_id2); bsc->serdes_flags = 0; fast_ether = 0; if (bootverbose) device_printf(dev, "OUI 0x%06x, model 0x%04x, rev. %d\n", bsc->mii_oui, bsc->mii_model, bsc->mii_rev); /* Handle any special cases based on the PHY ID */ switch (bsc->mii_oui) { case MII_OUI_BROADCOM: case MII_OUI_BROADCOM2: break; case MII_OUI_xxBROADCOM: switch (bsc->mii_model) { case MII_MODEL_xxBROADCOM_BCM5706: /* * The 5464 PHY used in the 5706 supports both copper * and fiber interfaces over GMII. Need to check the * shadow registers to see which mode is actually * in effect, and therefore whether we have 5706C or * 5706S. */ PHY_WRITE(sc, BRGPHY_MII_SHADOW_1C, BRGPHY_SHADOW_1C_MODE_CTRL); if (PHY_READ(sc, BRGPHY_MII_SHADOW_1C) & BRGPHY_SHADOW_1C_ENA_1000X) { bsc->serdes_flags |= BRGPHY_5706S; sc->mii_flags |= MIIF_HAVEFIBER; } break; } break; case MII_OUI_xxBROADCOM_ALT1: switch (bsc->mii_model) { case MII_MODEL_xxBROADCOM_ALT1_BCM5708S: bsc->serdes_flags |= BRGPHY_5708S; sc->mii_flags |= MIIF_HAVEFIBER; break; } break; default: device_printf(dev, "Unrecognized OUI for PHY!\n"); } ifp = sc->mii_pdata->mii_ifp; /* Find the MAC driver associated with this PHY. */ if (strcmp(ifp->if_dname, "bge") == 0) { bge_sc = ifp->if_softc; } else if (strcmp(ifp->if_dname, "bce") == 0) { bce_sc = ifp->if_softc; } /* Todo: Need to add additional controllers such as 5906 & 5787F */ /* The 590x chips are 10/100 only. */ if (bge_sc && pci_get_vendor(bge_sc->bge_dev) == BCOM_VENDORID && (pci_get_device(bge_sc->bge_dev) == BCOM_DEVICEID_BCM5901 || pci_get_device(bge_sc->bge_dev) == BCOM_DEVICEID_BCM5901A2 || pci_get_device(bge_sc->bge_dev) == BCOM_DEVICEID_BCM5906 || pci_get_device(bge_sc->bge_dev) == BCOM_DEVICEID_BCM5906M)) { fast_ether = 1; sc->mii_anegticks = MII_ANEGTICKS; } brgphy_reset(sc); /* Read the PHY's capabilities. */ sc->mii_capabilities = PHY_READ(sc, MII_BMSR) & ma->mii_capmask; if (sc->mii_capabilities & BMSR_EXTSTAT) sc->mii_extcapabilities = PHY_READ(sc, MII_EXTSR); device_printf(dev, " "); #define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) /* Create an instance of Ethernet media. */ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst), BMCR_ISO); /* Add the supported media types */ if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst), BRGPHY_S10); printf("10baseT, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst), BRGPHY_S10 | BRGPHY_BMCR_FDX); printf("10baseT-FDX, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst), BRGPHY_S100); printf("100baseTX, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst), BRGPHY_S100 | BRGPHY_BMCR_FDX); printf("100baseTX-FDX, "); if (fast_ether == 0) { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, 0, sc->mii_inst), BRGPHY_S1000); printf("1000baseT, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, sc->mii_inst), BRGPHY_S1000 | BRGPHY_BMCR_FDX); printf("1000baseT-FDX, "); } } else { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, sc->mii_inst), BRGPHY_S1000 | BRGPHY_BMCR_FDX); printf("1000baseSX-FDX, "); /* 2.5G support is a software enabled feature on the 5708S and 5709S. */ if (bce_sc && (bce_sc->bce_phy_flags & BCE_PHY_2_5G_CAPABLE_FLAG)) { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_2500_SX, IFM_FDX, sc->mii_inst), 0); printf("2500baseSX-FDX, "); } } ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0); printf("auto\n"); #undef ADD MIIBUS_MEDIAINIT(sc->mii_dev); return (0); }
static int tlphy_attach(device_t dev) { struct tlphy_softc *sc; struct mii_attach_args *ma; struct mii_data *mii; const char *sep = ""; int capmask = 0xFFFFFFFF; sc = device_get_softc(dev); ma = device_get_ivars(dev); mii_softc_init(&sc->sc_mii, ma); sc->sc_mii.mii_dev = device_get_parent(dev); mii = device_get_softc(sc->sc_mii.mii_dev); LIST_INSERT_HEAD(&mii->mii_phys, &sc->sc_mii, mii_list); sc->sc_mii.mii_inst = mii->mii_instance; sc->sc_mii.mii_service = tlphy_service; sc->sc_mii.mii_reset = mii_phy_reset; sc->sc_mii.mii_pdata = mii; if (mii->mii_instance) { struct mii_softc *other; device_t *devlist; int devs, i; device_get_children(sc->sc_mii.mii_dev, &devlist, &devs); for (i = 0; i < devs; i++) { if (strcmp(device_get_name(devlist[i]), "tlphy")) { other = device_get_softc(devlist[i]); capmask &= ~other->mii_capabilities; break; } } kfree(devlist, M_TEMP); } mii->mii_instance++; sc->sc_mii.mii_flags &= ~MIIF_NOISOLATE; mii_phy_reset(&sc->sc_mii); sc->sc_mii.mii_flags |= MIIF_NOISOLATE; /* * Note that if we're on a device that also supports 100baseTX, * we are not going to want to use the built-in 10baseT port, * since there will be another PHY on the MII wired up to the * UTP connector. The parent indicates this to us by specifying * the TLPHY_MEDIA_NO_10_T bit. */ sc->sc_mii.mii_capabilities = PHY_READ(&sc->sc_mii, MII_BMSR) & capmask /*ma->mii_capmask*/; #define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_LOOP, sc->sc_mii.mii_inst), MII_MEDIA_10_T); #define PRINT(s) kprintf("%s%s", sep, s); sep = ", " device_printf(dev, " "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_2, 0, sc->sc_mii.mii_inst), 0); PRINT("10base2/BNC"); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_5, 0, sc->sc_mii.mii_inst), 0); PRINT("10base5/AUI"); if (sc->sc_mii.mii_capabilities & BMSR_MEDIAMASK) { kprintf("%s", sep); mii_phy_add_media(&sc->sc_mii); } else { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->sc_mii.mii_inst), MII_MEDIA_NONE); } kprintf("\n"); #undef ADD #undef PRINT MIIBUS_MEDIAINIT(sc->sc_mii.mii_dev); return(0); }
static int e1000phy_attach(device_t dev) { struct e1000phy_softc *esc; struct mii_softc *sc; struct mii_attach_args *ma; struct mii_data *mii; int fast_ether; esc = device_get_softc(dev); sc = &esc->mii_sc; ma = device_get_ivars(dev); sc->mii_dev = device_get_parent(dev); mii = device_get_softc(sc->mii_dev); LIST_INSERT_HEAD(&mii->mii_phys, sc, mii_list); sc->mii_inst = mii->mii_instance; sc->mii_phy = ma->mii_phyno; sc->mii_service = e1000phy_service; sc->mii_pdata = mii; sc->mii_anegticks = MII_ANEGTICKS_GIGE; mii->mii_instance++; fast_ether = 0; esc->mii_model = MII_MODEL(ma->mii_id2); switch (esc->mii_model) { case MII_MODEL_MARVELL_E1011: case MII_MODEL_MARVELL_E1112: if (PHY_READ(sc, E1000_ESSR) & E1000_ESSR_FIBER_LINK) sc->mii_flags |= MIIF_HAVEFIBER; break; case MII_MODEL_MARVELL_E3082: /* 88E3082 10/100 Fast Ethernet PHY. */ sc->mii_anegticks = MII_ANEGTICKS; fast_ether = 1; break; } e1000phy_reset(sc); device_printf(dev, " "); #define ADD(m, c) ifmedia_add(&mii->mii_media, (m), (c), NULL) ADD(IFM_MAKEWORD(IFM_ETHER, IFM_NONE, 0, sc->mii_inst), E1000_CR_ISOLATE); if ((sc->mii_flags & MIIF_HAVEFIBER) == 0) { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, 0, sc->mii_inst), E1000_CR_SPEED_10); printf("10baseT, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_10_T, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_10 | E1000_CR_FULL_DUPLEX); printf("10baseT-FDX, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, 0, sc->mii_inst), E1000_CR_SPEED_100); printf("100baseTX, "); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_100_TX, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_100 | E1000_CR_FULL_DUPLEX); printf("100baseTX-FDX, "); if (fast_ether == 0) { /* * 1000BT-simplex not supported; driver must ignore * this entry, but it must be present in order to * manually set full-duplex. */ ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, 0, sc->mii_inst), E1000_CR_SPEED_1000); ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_T, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX); printf("1000baseTX-FDX, "); } } else { ADD(IFM_MAKEWORD(IFM_ETHER, IFM_1000_SX, IFM_FDX, sc->mii_inst), E1000_CR_SPEED_1000 | E1000_CR_FULL_DUPLEX); printf("1000baseSX-FDX, "); } ADD(IFM_MAKEWORD(IFM_ETHER, IFM_AUTO, 0, sc->mii_inst), 0); printf("auto\n"); #undef ADD MIIBUS_MEDIAINIT(sc->mii_dev); return (0); }
static int g_audio_attach(device_t dev) { struct g_audio_softc *sc = device_get_softc(dev); struct usb_attach_arg *uaa = device_get_ivars(dev); int error; int i; uint8_t iface_index[3]; DPRINTFN(11, "\n"); device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, "g_audio", NULL, MTX_DEF); usb_callout_init_mtx(&sc->sc_callout, &sc->sc_mtx, 0); usb_callout_init_mtx(&sc->sc_watchdog, &sc->sc_mtx, 0); sc->sc_mode = G_AUDIO_MODE_SILENT; sc->sc_noise_rem = 1; for (i = 0; i != G_AUDIO_FRAMES; i++) { sc->sc_data_len[0][i] = G_AUDIO_BUFSIZE / G_AUDIO_FRAMES; sc->sc_data_len[1][i] = G_AUDIO_BUFSIZE / G_AUDIO_FRAMES; } iface_index[0] = uaa->info.bIfaceIndex; iface_index[1] = uaa->info.bIfaceIndex + 1; iface_index[2] = uaa->info.bIfaceIndex + 2; error = usbd_set_alt_interface_index(uaa->device, iface_index[1], 1); if (error) { DPRINTF("alt iface setting error=%s\n", usbd_errstr(error)); goto detach; } error = usbd_set_alt_interface_index(uaa->device, iface_index[2], 1); if (error) { DPRINTF("alt iface setting error=%s\n", usbd_errstr(error)); goto detach; } error = usbd_transfer_setup(uaa->device, iface_index, sc->sc_xfer, g_audio_config, G_AUDIO_N_TRANSFER, sc, &sc->sc_mtx); if (error) { DPRINTF("error=%s\n", usbd_errstr(error)); goto detach; } usbd_set_parent_iface(uaa->device, iface_index[1], iface_index[0]); usbd_set_parent_iface(uaa->device, iface_index[2], iface_index[0]); mtx_lock(&sc->sc_mtx); usbd_transfer_start(sc->sc_xfer[G_AUDIO_ISOC0_RD]); usbd_transfer_start(sc->sc_xfer[G_AUDIO_ISOC1_RD]); usbd_transfer_start(sc->sc_xfer[G_AUDIO_ISOC0_WR]); usbd_transfer_start(sc->sc_xfer[G_AUDIO_ISOC1_WR]); g_audio_timeout_reset(sc); g_audio_watchdog_reset(sc); mtx_unlock(&sc->sc_mtx); return (0); /* success */ detach: g_audio_detach(dev); return (ENXIO); /* error */ }
/** * \brief Verify the existance of attached device instances and perform * probe/attach processing for newly arrived devices. * * \param dev The NewBus device representing this XenBus bus. * * \return On success, 0. Otherwise an errno value indicating the * type of failure. */ static int xenbusb_probe_children(device_t dev) { device_t *kids; struct xenbus_device_ivars *ivars; int i, count, error; if (device_get_children(dev, &kids, &count) == 0) { for (i = 0; i < count; i++) { if (device_get_state(kids[i]) != DS_NOTPRESENT) { /* * We already know about this one. * Make sure it's still here. */ xenbusb_verify_device(dev, kids[i]); continue; } error = device_probe_and_attach(kids[i]); if (error == ENXIO) { struct xenbusb_softc *xbs; /* * We don't have a PV driver for this device. * However, an emulated device we do support * may share this backend. Hide the node from * XenBus until the next rescan, but leave it's * state unchanged so we don't inadvertently * prevent attachment of any emulated device. */ xenbusb_delete_child(dev, kids[i]); /* * Since the XenStore state of this device * still indicates a pending attach, manually * release it's hold on the boot process. */ xbs = device_get_softc(dev); xenbusb_release_confighook(xbs); continue; } else if (error) { /* * Transition device to the closed state * so the world knows that attachment will * not occur. */ xenbus_set_state(kids[i], XenbusStateClosed); /* * Remove our record of this device. * So long as it remains in the closed * state in the XenStore, we will not find * it again. The state will only change * if the control domain actively reconfigures * this device. */ xenbusb_delete_child(dev, kids[i]); continue; } /* * Augment default newbus provided dynamic sysctl * variables with the standard ivar contents of * XenBus devices. */ xenbusb_device_sysctl_init(kids[i]); /* * Now that we have a driver managing this device * that can receive otherend state change events, * hook up a watch for them. */ ivars = device_get_ivars(kids[i]); xs_register_watch(&ivars->xd_otherend_watch); xs_register_watch(&ivars->xd_local_watch); } free(kids, M_TEMP); } return (0); }
static int afd_sense(device_t dev) { struct ata_device *atadev = device_get_softc(dev); struct afd_softc *fdp = device_get_ivars(dev); struct afd_capacity capacity; struct afd_capacity_big capacity_big; struct afd_capabilities capabilities; int8_t ccb1[16] = { ATAPI_READ_CAPACITY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; int8_t ccb2[16] = { ATAPI_READ_CAPACITY_16, 0x10, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, sizeof(struct afd_capacity_big) & 0xff, 0, 0 }; int8_t ccb3[16] = { ATAPI_MODE_SENSE_BIG, 0, ATAPI_REWRITEABLE_CAP_PAGE, 0, 0, 0, 0, sizeof(struct afd_capabilities) >> 8, sizeof(struct afd_capabilities) & 0xff, 0, 0, 0, 0, 0, 0, 0 }; int timeout = 20; int error, count; fdp->mediasize = 0; /* wait for device to get ready */ while ((error = afd_test_ready(dev)) && timeout--) { DELAY(100000); } if (error == EBUSY) return 1; /* The IOMEGA Clik! doesn't support reading the cap page, fake it */ if (!strncmp(atadev->param.model, "IOMEGA Clik!", 12)) { fdp->heads = 1; fdp->sectors = 2; fdp->mediasize = 39441 * 1024; fdp->sectorsize = 512; afd_test_ready(dev); return 0; } /* get drive capacity */ if (!ata_atapicmd(dev, ccb1, (caddr_t)&capacity, sizeof(struct afd_capacity), ATA_R_READ, 30)) { fdp->heads = 16; fdp->sectors = 63; fdp->sectorsize = be32toh(capacity.blocksize); fdp->mediasize = (u_int64_t)be32toh(capacity.capacity)*fdp->sectorsize; afd_test_ready(dev); return 0; } /* get drive capacity big */ if (!ata_atapicmd(dev, ccb2, (caddr_t)&capacity_big, sizeof(struct afd_capacity_big), ATA_R_READ | ATA_R_QUIET, 30)) { fdp->heads = 16; fdp->sectors = 63; fdp->sectorsize = be32toh(capacity_big.blocksize); fdp->mediasize = be64toh(capacity_big.capacity)*fdp->sectorsize; afd_test_ready(dev); return 0; } /* get drive capabilities, some bugridden drives needs this repeated */ for (count = 0 ; count < 5 ; count++) { if (!ata_atapicmd(dev, ccb3, (caddr_t)&capabilities, sizeof(struct afd_capabilities), ATA_R_READ, 30) && capabilities.page_code == ATAPI_REWRITEABLE_CAP_PAGE) { fdp->heads = capabilities.heads; fdp->sectors = capabilities.sectors; fdp->sectorsize = be16toh(capabilities.sector_size); fdp->mediasize = be16toh(capabilities.cylinders) * fdp->heads * fdp->sectors * fdp->sectorsize; if (!capabilities.medium_type) fdp->mediasize = 0; return 0; } } return 1; } static int afd_prevent_allow(device_t dev, int lock) { struct ata_device *atadev = device_get_softc(dev); int8_t ccb[16] = { ATAPI_PREVENT_ALLOW, 0, 0, 0, lock, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; if (!strncmp(atadev->param.model, "IOMEGA Clik!", 12)) return 0; return ata_atapicmd(dev, ccb, NULL, 0, 0, 30); } static int afd_test_ready(device_t dev) { int8_t ccb[16] = { ATAPI_TEST_UNIT_READY, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0, 0 }; return ata_atapicmd(dev, ccb, NULL, 0, 0, 30); }
static const struct ofw_bus_devinfo * smu_get_devinfo(device_t bus, device_t dev) { return (device_get_ivars(dev)); }
static int cdce_attach(device_t dev) { struct cdce_softc *sc = device_get_softc(dev); struct usb_ether *ue = &sc->sc_ue; struct usb_attach_arg *uaa = device_get_ivars(dev); struct usb_interface *iface; const struct usb_cdc_union_descriptor *ud; const struct usb_interface_descriptor *id; const struct usb_cdc_ethernet_descriptor *ued; const struct usb_config *pcfg; uint32_t seed; int error; uint8_t i; uint8_t data_iface_no; char eaddr_str[5 * ETHER_ADDR_LEN]; /* approx */ sc->sc_flags = USB_GET_DRIVER_INFO(uaa); sc->sc_ue.ue_udev = uaa->device; device_set_usb_desc(dev); mtx_init(&sc->sc_mtx, device_get_nameunit(dev), NULL, MTX_DEF); ud = usbd_find_descriptor (uaa->device, NULL, uaa->info.bIfaceIndex, UDESC_CS_INTERFACE, 0xFF, UDESCSUB_CDC_UNION, 0xFF); if ((ud == NULL) || (ud->bLength < sizeof(*ud)) || (sc->sc_flags & CDCE_FLAG_NO_UNION)) { DPRINTFN(1, "No union descriptor!\n"); sc->sc_ifaces_index[0] = uaa->info.bIfaceIndex; sc->sc_ifaces_index[1] = uaa->info.bIfaceIndex; goto alloc_transfers; } data_iface_no = ud->bSlaveInterface[0]; for (i = 0;; i++) { iface = usbd_get_iface(uaa->device, i); if (iface) { id = usbd_get_interface_descriptor(iface); if (id && (id->bInterfaceNumber == data_iface_no)) { sc->sc_ifaces_index[0] = i; sc->sc_ifaces_index[1] = uaa->info.bIfaceIndex; usbd_set_parent_iface(uaa->device, i, uaa->info.bIfaceIndex); break; } } else { device_printf(dev, "no data interface found\n"); goto detach; } } /* * <quote> * * The Data Class interface of a networking device shall have * a minimum of two interface settings. The first setting * (the default interface setting) includes no endpoints and * therefore no networking traffic is exchanged whenever the * default interface setting is selected. One or more * additional interface settings are used for normal * operation, and therefore each includes a pair of endpoints * (one IN, and one OUT) to exchange network traffic. Select * an alternate interface setting to initialize the network * aspects of the device and to enable the exchange of * network traffic. * * </quote> * * Some devices, most notably cable modems, include interface * settings that have no IN or OUT endpoint, therefore loop * through the list of all available interface settings * looking for one with both IN and OUT endpoints. */ alloc_transfers: pcfg = cdce_config; /* Default Configuration */ for (i = 0; i != 32; i++) { error = usbd_set_alt_interface_index(uaa->device, sc->sc_ifaces_index[0], i); if (error) break; #if CDCE_HAVE_NCM if ((i == 0) && (cdce_ncm_init(sc) == 0)) pcfg = cdce_ncm_config; #endif error = usbd_transfer_setup(uaa->device, sc->sc_ifaces_index, sc->sc_xfer, pcfg, CDCE_N_TRANSFER, sc, &sc->sc_mtx); if (error == 0) break; } if (error || (i == 32)) { device_printf(dev, "No valid alternate " "setting found\n"); goto detach; } ued = usbd_find_descriptor (uaa->device, NULL, uaa->info.bIfaceIndex, UDESC_CS_INTERFACE, 0xFF, UDESCSUB_CDC_ENF, 0xFF); if ((ued == NULL) || (ued->bLength < sizeof(*ued))) { error = USB_ERR_INVAL; } else { error = usbd_req_get_string_any(uaa->device, NULL, eaddr_str, sizeof(eaddr_str), ued->iMacAddress); } if (error) { /* fake MAC address */ device_printf(dev, "faking MAC address\n"); seed = ticks; sc->sc_ue.ue_eaddr[0] = 0x2a; memcpy(&sc->sc_ue.ue_eaddr[1], &seed, sizeof(uint32_t)); sc->sc_ue.ue_eaddr[5] = device_get_unit(dev); } else { memset(sc->sc_ue.ue_eaddr, 0, sizeof(sc->sc_ue.ue_eaddr)); for (i = 0; i != (ETHER_ADDR_LEN * 2); i++) { char c = eaddr_str[i]; if ('0' <= c && c <= '9') c -= '0'; else if (c != 0) c -= 'A' - 10; else break; c &= 0xf; if ((i & 1) == 0) c <<= 4; sc->sc_ue.ue_eaddr[i / 2] |= c; } if (uaa->usb_mode == USB_MODE_DEVICE) { /* * Do not use the same MAC address like the peer ! */ sc->sc_ue.ue_eaddr[5] ^= 0xFF; } } ue->ue_sc = sc; ue->ue_dev = dev; ue->ue_udev = uaa->device; ue->ue_mtx = &sc->sc_mtx; ue->ue_methods = &cdce_ue_methods; error = uether_ifattach(ue); if (error) { device_printf(dev, "could not attach interface\n"); goto detach; } return (0); /* success */ detach: cdce_detach(dev); return (ENXIO); /* failure */ }
static int ubt_attach(device_t self) { struct ubt_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); usb_config_descriptor_t *cd; usb_endpoint_descriptor_t *ed; int err; uint8_t count, i; DPRINTFN(50, "ubt_attach: sc=%p\n", sc); sc->sc_udev = uaa->device; sc->sc_dev = self; /* * Move the device into the configured state */ err = usbd_set_config_index(sc->sc_udev, 0, 1); if (err) { kprintf("%s: failed to set configuration idx 0: %s\n", device_get_nameunit(sc->sc_dev), usbd_errstr(err)); return ENXIO; } /* * Interface 0 must have 3 endpoints * 1) Interrupt endpoint to receive HCI events * 2) Bulk IN endpoint to receive ACL data * 3) Bulk OUT endpoint to send ACL data */ err = usbd_device2interface_handle(sc->sc_udev, 0, &sc->sc_iface0); if (err) { kprintf("%s: Could not get interface 0 handle %s (%d)\n", device_get_nameunit(sc->sc_dev), usbd_errstr(err), err); return ENXIO; } sc->sc_evt_addr = -1; sc->sc_aclrd_addr = -1; sc->sc_aclwr_addr = -1; count = 0; (void)usbd_endpoint_count(sc->sc_iface0, &count); for (i = 0 ; i < count ; i++) { int dir, type; ed = usbd_interface2endpoint_descriptor(sc->sc_iface0, i); if (ed == NULL) { kprintf("%s: could not read endpoint descriptor %d\n", device_get_nameunit(sc->sc_dev), i); return ENXIO; } dir = UE_GET_DIR(ed->bEndpointAddress); type = UE_GET_XFERTYPE(ed->bmAttributes); if (dir == UE_DIR_IN && type == UE_INTERRUPT) sc->sc_evt_addr = ed->bEndpointAddress; else if (dir == UE_DIR_IN && type == UE_BULK) sc->sc_aclrd_addr = ed->bEndpointAddress; else if (dir == UE_DIR_OUT && type == UE_BULK) sc->sc_aclwr_addr = ed->bEndpointAddress; } if (sc->sc_evt_addr == -1) { kprintf("%s: missing INTERRUPT endpoint on interface 0\n", device_get_nameunit(sc->sc_dev)); return ENXIO; } if (sc->sc_aclrd_addr == -1) { kprintf("%s: missing BULK IN endpoint on interface 0\n", device_get_nameunit(sc->sc_dev)); return ENXIO; } if (sc->sc_aclwr_addr == -1) { kprintf("%s: missing BULK OUT endpoint on interface 0\n", device_get_nameunit(sc->sc_dev)); return ENXIO; } /* * Interface 1 must have 2 endpoints * 1) Isochronous IN endpoint to receive SCO data * 2) Isochronous OUT endpoint to send SCO data * * and will have several configurations, which can be selected * via a sysctl variable. We select config 0 to start, which * means that no SCO data will be available. */ err = usbd_device2interface_handle(sc->sc_udev, 1, &sc->sc_iface1); if (err) { kprintf("%s: Could not get interface 1 handle %s (%d)\n", device_get_nameunit(sc->sc_dev), usbd_errstr(err), err); return ENXIO; } cd = usbd_get_config_descriptor(sc->sc_udev); if (cd == NULL) { kprintf("%s: could not get config descriptor\n", device_get_nameunit(sc->sc_dev)); return ENXIO; } sc->sc_alt_config = usbd_get_no_alts(cd, 1); /* set initial config */ err = ubt_set_isoc_config(sc); if (err) { kprintf("%s: ISOC config failed\n", device_get_nameunit(sc->sc_dev)); return ENXIO; } /* Attach HCI */ sc->sc_unit = hci_attach(&ubt_hci, sc->sc_dev, 0); usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, sc->sc_udev, sc->sc_dev); sc->sc_ok = 1; sysctl_ctx_init(&sc->sysctl_ctx); sc->sysctl_tree = SYSCTL_ADD_NODE(&sc->sysctl_ctx, SYSCTL_STATIC_CHILDREN(_hw), OID_AUTO, device_get_nameunit(sc->sc_dev), CTLFLAG_RD, 0, ""); if (sc->sysctl_tree == NULL) { /* Failure isn't fatal */ device_printf(sc->sc_dev, "Unable to create sysctl tree\n"); return 0; } SYSCTL_ADD_PROC(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "config", CTLTYPE_INT|CTLFLAG_RW, (void *)sc, 0, ubt_sysctl_config, "I", "Configuration number"); SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "alt_config", CTLFLAG_RD, &sc->sc_alt_config, 0, "Number of alternate configurations"); SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "sco_rxsize", CTLFLAG_RD, &sc->sc_scord_size, 0, "Max SCO receive size"); SYSCTL_ADD_INT(&sc->sysctl_ctx, SYSCTL_CHILDREN(sc->sysctl_tree), OID_AUTO, "sco_wrsize", CTLFLAG_RD, &sc->sc_scowr_size, 0, "Max SCO transmit size"); return 0; }
static int usie_attach(device_t self) { struct usie_softc *sc = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct ifnet *ifp; struct usb_interface *iface; struct usb_interface_descriptor *id; struct usb_device_request req; int err; uint16_t fwattr; uint8_t iface_index; uint8_t ifidx; uint8_t start; device_set_usb_desc(self); sc->sc_udev = uaa->device; sc->sc_dev = self; mtx_init(&sc->sc_mtx, "usie", MTX_NETWORK_LOCK, MTX_DEF); ucom_ref(&sc->sc_super_ucom); TASK_INIT(&sc->sc_if_status_task, 0, usie_if_status_cb, sc); TASK_INIT(&sc->sc_if_sync_task, 0, usie_if_sync_cb, sc); usb_callout_init_mtx(&sc->sc_if_sync_ch, &sc->sc_mtx, 0); mtx_lock(&sc->sc_mtx); /* set power mode to D0 */ req.bmRequestType = UT_WRITE_VENDOR_DEVICE; req.bRequest = USIE_POWER; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, 0); if (usie_do_request(sc, &req, NULL)) { mtx_unlock(&sc->sc_mtx); goto detach; } /* read fw attr */ fwattr = 0; req.bmRequestType = UT_READ_VENDOR_DEVICE; req.bRequest = USIE_FW_ATTR; USETW(req.wValue, 0); USETW(req.wIndex, 0); USETW(req.wLength, sizeof(fwattr)); if (usie_do_request(sc, &req, &fwattr)) { mtx_unlock(&sc->sc_mtx); goto detach; } mtx_unlock(&sc->sc_mtx); /* check DHCP supports */ DPRINTF("fwattr=%x\n", fwattr); if (!(fwattr & USIE_FW_DHCP)) { device_printf(self, "DHCP is not supported. A firmware upgrade might be needed.\n"); } /* find available interfaces */ sc->sc_nucom = 0; for (ifidx = 0; ifidx < USIE_IFACE_MAX; ifidx++) { iface = usbd_get_iface(uaa->device, ifidx); if (iface == NULL) break; id = usbd_get_interface_descriptor(iface); if ((id == NULL) || (id->bInterfaceClass != UICLASS_VENDOR)) continue; /* setup Direct IP transfer */ if (id->bInterfaceNumber >= 7 && id->bNumEndpoints == 3) { sc->sc_if_ifnum = id->bInterfaceNumber; iface_index = ifidx; DPRINTF("ifnum=%d, ifidx=%d\n", sc->sc_if_ifnum, ifidx); err = usbd_transfer_setup(uaa->device, &iface_index, sc->sc_if_xfer, usie_if_config, USIE_IF_N_XFER, sc, &sc->sc_mtx); if (err == 0) continue; device_printf(self, "could not allocate USB transfers on " "iface_index=%d, err=%s\n", iface_index, usbd_errstr(err)); goto detach; } /* setup ucom */ if (sc->sc_nucom >= USIE_UCOM_MAX) continue; usbd_set_parent_iface(uaa->device, ifidx, uaa->info.bIfaceIndex); DPRINTF("NumEndpoints=%d bInterfaceNumber=%d\n", id->bNumEndpoints, id->bInterfaceNumber); if (id->bNumEndpoints == 2) { sc->sc_uc_xfer[sc->sc_nucom][0] = NULL; start = 1; } else start = 0; err = usbd_transfer_setup(uaa->device, &ifidx, sc->sc_uc_xfer[sc->sc_nucom] + start, usie_uc_config + start, USIE_UC_N_XFER - start, &sc->sc_ucom[sc->sc_nucom], &sc->sc_mtx); if (err != 0) { DPRINTF("usbd_transfer_setup error=%s\n", usbd_errstr(err)); continue; } mtx_lock(&sc->sc_mtx); for (; start < USIE_UC_N_XFER; start++) usbd_xfer_set_stall(sc->sc_uc_xfer[sc->sc_nucom][start]); mtx_unlock(&sc->sc_mtx); sc->sc_uc_ifnum[sc->sc_nucom] = id->bInterfaceNumber; sc->sc_nucom++; /* found a port */ } if (sc->sc_nucom == 0) { device_printf(self, "no comports found\n"); goto detach; } err = ucom_attach(&sc->sc_super_ucom, sc->sc_ucom, sc->sc_nucom, sc, &usie_uc_callback, &sc->sc_mtx); if (err != 0) { DPRINTF("ucom_attach failed\n"); goto detach; } DPRINTF("Found %d interfaces.\n", sc->sc_nucom); /* setup ifnet (Direct IP) */ sc->sc_ifp = ifp = if_alloc(IFT_OTHER); if (ifp == NULL) { device_printf(self, "Could not allocate a network interface\n"); goto detach; } if_initname(ifp, "usie", device_get_unit(self)); ifp->if_softc = sc; ifp->if_mtu = USIE_MTU_MAX; ifp->if_flags |= IFF_NOARP; ifp->if_init = usie_if_init; ifp->if_ioctl = usie_if_ioctl; ifp->if_start = usie_if_start; ifp->if_output = usie_if_output; IFQ_SET_MAXLEN(&ifp->if_snd, ifqmaxlen); ifp->if_snd.ifq_drv_maxlen = ifqmaxlen; IFQ_SET_READY(&ifp->if_snd); if_attach(ifp); bpfattach(ifp, DLT_RAW, 0); if (fwattr & USIE_PM_AUTO) { usbd_set_power_mode(uaa->device, USB_POWER_MODE_SAVE); DPRINTF("enabling automatic suspend and resume\n"); } else { usbd_set_power_mode(uaa->device, USB_POWER_MODE_ON); DPRINTF("USB power is always ON\n"); } DPRINTF("device attached\n"); return (0); detach: usie_detach(self); return (ENOMEM); }
static int pci_ioctl(struct dev_ioctl_args *ap) { device_t pcidev, brdev; void *confdata; const char *name; struct devlist *devlist_head; struct pci_conf_io *cio; struct pci_devinfo *dinfo; struct pci_io *io; struct pci_bar_io *bio; struct pci_match_conf *pattern_buf; struct resource_list_entry *rle; uint32_t value; size_t confsz, iolen, pbufsz; int error, ionum, i, num_patterns; #ifdef PRE7_COMPAT struct pci_conf_old conf_old; struct pci_io iodata; struct pci_io_old *io_old; struct pci_match_conf_old *pattern_buf_old; io_old = NULL; pattern_buf_old = NULL; if (!(ap->a_fflag & FWRITE) && ap->a_cmd != PCIOCGETBAR && ap->a_cmd != PCIOCGETCONF && ap->a_cmd != PCIOCGETCONF_OLD) return EPERM; #else if (!(ap->a_fflag & FWRITE) && ap->a_cmd != PCIOCGETBAR && ap->a_cmd != PCIOCGETCONF) return EPERM; #endif switch(ap->a_cmd) { #ifdef PRE7_COMPAT case PCIOCGETCONF_OLD: /* FALLTHROUGH */ #endif case PCIOCGETCONF: cio = (struct pci_conf_io *)ap->a_data; pattern_buf = NULL; num_patterns = 0; dinfo = NULL; cio->num_matches = 0; /* * If the user specified an offset into the device list, * but the list has changed since they last called this * ioctl, tell them that the list has changed. They will * have to get the list from the beginning. */ if ((cio->offset != 0) && (cio->generation != pci_generation)){ cio->status = PCI_GETCONF_LIST_CHANGED; error = 0; break; } /* * Check to see whether the user has asked for an offset * past the end of our list. */ if (cio->offset >= pci_numdevs) { cio->status = PCI_GETCONF_LAST_DEVICE; error = 0; break; } /* get the head of the device queue */ devlist_head = &pci_devq; /* * Determine how much room we have for pci_conf structures. * Round the user's buffer size down to the nearest * multiple of sizeof(struct pci_conf) in case the user * didn't specify a multiple of that size. */ #ifdef PRE7_COMPAT if (ap->a_cmd == PCIOCGETCONF_OLD) confsz = sizeof(struct pci_conf_old); else #endif confsz = sizeof(struct pci_conf); iolen = min(cio->match_buf_len - (cio->match_buf_len % confsz), pci_numdevs * confsz); /* * Since we know that iolen is a multiple of the size of * the pciconf union, it's okay to do this. */ ionum = iolen / confsz; /* * If this test is true, the user wants the pci_conf * structures returned to match the supplied entries. */ if ((cio->num_patterns > 0) && (cio->num_patterns < pci_numdevs) && (cio->pat_buf_len > 0)) { /* * pat_buf_len needs to be: * num_patterns * sizeof(struct pci_match_conf) * While it is certainly possible the user just * allocated a large buffer, but set the number of * matches correctly, it is far more likely that * their kernel doesn't match the userland utility * they're using. It's also possible that the user * forgot to initialize some variables. Yes, this * may be overly picky, but I hazard to guess that * it's far more likely to just catch folks that * updated their kernel but not their userland. */ #ifdef PRE7_COMPAT if (ap->a_cmd == PCIOCGETCONF_OLD) pbufsz = sizeof(struct pci_match_conf_old); else #endif pbufsz = sizeof(struct pci_match_conf); if (cio->num_patterns * pbufsz != cio->pat_buf_len) { /* The user made a mistake, return an error. */ cio->status = PCI_GETCONF_ERROR; error = EINVAL; break; } /* * Allocate a buffer to hold the patterns. */ #ifdef PRE7_COMPAT if (ap->a_cmd == PCIOCGETCONF_OLD) { pattern_buf_old = kmalloc(cio->pat_buf_len, M_TEMP, M_WAITOK); error = copyin(cio->patterns, pattern_buf_old, cio->pat_buf_len); } else #endif { pattern_buf = kmalloc(cio->pat_buf_len, M_TEMP, M_WAITOK); error = copyin(cio->patterns, pattern_buf, cio->pat_buf_len); } if (error != 0) { error = EINVAL; goto getconfexit; } num_patterns = cio->num_patterns; } else if ((cio->num_patterns > 0) || (cio->pat_buf_len > 0)) { /* * The user made a mistake, spit out an error. */ cio->status = PCI_GETCONF_ERROR; error = EINVAL; break; } /* * Go through the list of devices and copy out the devices * that match the user's criteria. */ for (cio->num_matches = 0, error = 0, i = 0, dinfo = STAILQ_FIRST(devlist_head); (dinfo != NULL) && (cio->num_matches < ionum) && (error == 0) && (i < pci_numdevs) && (dinfo != NULL); dinfo = STAILQ_NEXT(dinfo, pci_links), i++) { if (i < cio->offset) continue; /* Populate pd_name and pd_unit */ name = NULL; if (dinfo->cfg.dev) name = device_get_name(dinfo->cfg.dev); if (name) { strncpy(dinfo->conf.pd_name, name, sizeof(dinfo->conf.pd_name)); dinfo->conf.pd_name[PCI_MAXNAMELEN] = 0; dinfo->conf.pd_unit = device_get_unit(dinfo->cfg.dev); } else { dinfo->conf.pd_name[0] = '\0'; dinfo->conf.pd_unit = 0; } #ifdef PRE7_COMPAT if ((ap->a_cmd == PCIOCGETCONF_OLD && (pattern_buf_old == NULL || pci_conf_match_old(pattern_buf_old, num_patterns, &dinfo->conf) == 0)) || (ap->a_cmd == PCIOCGETCONF && (pattern_buf == NULL || pci_conf_match(pattern_buf, num_patterns, &dinfo->conf) == 0))) { #else if (pattern_buf == NULL || pci_conf_match(pattern_buf, num_patterns, &dinfo->conf) == 0) { #endif /* * If we've filled up the user's buffer, * break out at this point. Since we've * got a match here, we'll pick right back * up at the matching entry. We can also * tell the user that there are more matches * left. */ if (cio->num_matches >= ionum) break; #ifdef PRE7_COMPAT if (ap->a_cmd == PCIOCGETCONF_OLD) { conf_old.pc_sel.pc_bus = dinfo->conf.pc_sel.pc_bus; conf_old.pc_sel.pc_dev = dinfo->conf.pc_sel.pc_dev; conf_old.pc_sel.pc_func = dinfo->conf.pc_sel.pc_func; conf_old.pc_hdr = dinfo->conf.pc_hdr; conf_old.pc_subvendor = dinfo->conf.pc_subvendor; conf_old.pc_subdevice = dinfo->conf.pc_subdevice; conf_old.pc_vendor = dinfo->conf.pc_vendor; conf_old.pc_device = dinfo->conf.pc_device; conf_old.pc_class = dinfo->conf.pc_class; conf_old.pc_subclass = dinfo->conf.pc_subclass; conf_old.pc_progif = dinfo->conf.pc_progif; conf_old.pc_revid = dinfo->conf.pc_revid; strncpy(conf_old.pd_name, dinfo->conf.pd_name, sizeof(conf_old.pd_name)); conf_old.pd_name[PCI_MAXNAMELEN] = 0; conf_old.pd_unit = dinfo->conf.pd_unit; confdata = &conf_old; } else #endif confdata = &dinfo->conf; /* Only if we can copy it out do we count it. */ if (!(error = copyout(confdata, (caddr_t)cio->matches + confsz * cio->num_matches, confsz))) cio->num_matches++; } } /* * Set the pointer into the list, so if the user is getting * n records at a time, where n < pci_numdevs, */ cio->offset = i; /* * Set the generation, the user will need this if they make * another ioctl call with offset != 0. */ cio->generation = pci_generation; /* * If this is the last device, inform the user so he won't * bother asking for more devices. If dinfo isn't NULL, we * know that there are more matches in the list because of * the way the traversal is done. */ if (dinfo == NULL) cio->status = PCI_GETCONF_LAST_DEVICE; else cio->status = PCI_GETCONF_MORE_DEVS; getconfexit: if (pattern_buf != NULL) kfree(pattern_buf, M_TEMP); #ifdef PRE7_COMPAT if (pattern_buf_old != NULL) kfree(pattern_buf_old, M_TEMP); #endif break; #ifdef PRE7_COMPAT case PCIOCREAD_OLD: case PCIOCWRITE_OLD: io_old = (struct pci_io_old *)ap->a_data; iodata.pi_sel.pc_domain = 0; iodata.pi_sel.pc_bus = io_old->pi_sel.pc_bus; iodata.pi_sel.pc_dev = io_old->pi_sel.pc_dev; iodata.pi_sel.pc_func = io_old->pi_sel.pc_func; iodata.pi_reg = io_old->pi_reg; iodata.pi_width = io_old->pi_width; iodata.pi_data = io_old->pi_data; ap->a_data = (caddr_t)&iodata; /* FALLTHROUGH */ #endif case PCIOCREAD: case PCIOCWRITE: io = (struct pci_io *)ap->a_data; switch(io->pi_width) { case 4: case 2: case 1: /* Make sure register is in bounds and aligned. */ if (io->pi_reg < 0 || io->pi_reg + io->pi_width > PCI_REGMAX + 1 || io->pi_reg & (io->pi_width - 1)) { error = EINVAL; break; } /* * Assume that the user-level bus number is * in fact the physical PCI bus number. * Look up the grandparent, i.e. the bridge device, * so that we can issue configuration space cycles. */ pcidev = pci_find_dbsf(io->pi_sel.pc_domain, io->pi_sel.pc_bus, io->pi_sel.pc_dev, io->pi_sel.pc_func); if (pcidev) { brdev = device_get_parent( device_get_parent(pcidev)); #ifdef PRE7_COMPAT if (ap->a_cmd == PCIOCWRITE || ap->a_cmd == PCIOCWRITE_OLD) #else if (ap->a_cmd == PCIOCWRITE) #endif PCIB_WRITE_CONFIG(brdev, io->pi_sel.pc_bus, io->pi_sel.pc_dev, io->pi_sel.pc_func, io->pi_reg, io->pi_data, io->pi_width); #ifdef PRE7_COMPAT else if (ap->a_cmd == PCIOCREAD_OLD) io_old->pi_data = PCIB_READ_CONFIG(brdev, io->pi_sel.pc_bus, io->pi_sel.pc_dev, io->pi_sel.pc_func, io->pi_reg, io->pi_width); #endif else io->pi_data = PCIB_READ_CONFIG(brdev, io->pi_sel.pc_bus, io->pi_sel.pc_dev, io->pi_sel.pc_func, io->pi_reg, io->pi_width); error = 0; } else { #ifdef COMPAT_FREEBSD4 if (cmd == PCIOCREAD_OLD) { io_old->pi_data = -1; error = 0; } else #endif error = ENODEV; } break; default: error = EINVAL; break; } break; case PCIOCGETBAR: bio = (struct pci_bar_io *)ap->a_data; /* * Assume that the user-level bus number is * in fact the physical PCI bus number. */ pcidev = pci_find_dbsf(bio->pbi_sel.pc_domain, bio->pbi_sel.pc_bus, bio->pbi_sel.pc_dev, bio->pbi_sel.pc_func); if (pcidev == NULL) { error = ENODEV; break; } dinfo = device_get_ivars(pcidev); /* * Look for a resource list entry matching the requested BAR. * * XXX: This will not find BARs that are not initialized, but * maybe that is ok? */ rle = resource_list_find(&dinfo->resources, SYS_RES_MEMORY, bio->pbi_reg); if (rle == NULL) rle = resource_list_find(&dinfo->resources, SYS_RES_IOPORT, bio->pbi_reg); if (rle == NULL || rle->res == NULL) { error = EINVAL; break; } /* * Ok, we have a resource for this BAR. Read the lower * 32 bits to get any flags. */ value = pci_read_config(pcidev, bio->pbi_reg, 4); if (PCI_BAR_MEM(value)) { if (rle->type != SYS_RES_MEMORY) { error = EINVAL; break; } value &= ~PCIM_BAR_MEM_BASE; } else { if (rle->type != SYS_RES_IOPORT) { error = EINVAL; break; } value &= ~PCIM_BAR_IO_BASE; } bio->pbi_base = rman_get_start(rle->res) | value; bio->pbi_length = rman_get_size(rle->res); /* * Check the command register to determine if this BAR * is enabled. */ value = pci_read_config(pcidev, PCIR_COMMAND, 2); if (rle->type == SYS_RES_MEMORY) bio->pbi_enabled = (value & PCIM_CMD_MEMEN) != 0; else bio->pbi_enabled = (value & PCIM_CMD_PORTEN) != 0; error = 0; break; case PCIOCATTACHED: error = 0; io = (struct pci_io *)ap->a_data; pcidev = pci_find_dbsf(io->pi_sel.pc_domain, io->pi_sel.pc_bus, io->pi_sel.pc_dev, io->pi_sel.pc_func); if (pcidev != NULL) io->pi_data = device_is_attached(pcidev); else error = ENODEV; break; default: error = ENOTTY; break; } return (error); }
static int mlxd_attach(device_t dev) { struct mlxd_softc *sc = (struct mlxd_softc *)device_get_softc(dev); device_t parent; char *state; int s1, s2; debug_called(1); parent = device_get_parent(dev); sc->mlxd_controller = (struct mlx_softc *)device_get_softc(parent); sc->mlxd_unit = device_get_unit(dev); sc->mlxd_drive = device_get_ivars(dev); sc->mlxd_dev = dev; switch(sc->mlxd_drive->ms_state) { case MLX_SYSD_ONLINE: state = "online"; break; case MLX_SYSD_CRITICAL: state = "critical"; break; case MLX_SYSD_OFFLINE: state = "offline"; break; default: state = "unknown state"; } device_printf(dev, "%uMB (%u sectors) RAID %d (%s)\n", sc->mlxd_drive->ms_size / ((1024 * 1024) / MLX_BLKSIZE), sc->mlxd_drive->ms_size, sc->mlxd_drive->ms_raidlevel, state); sc->mlxd_disk = disk_alloc(); sc->mlxd_disk->d_open = mlxd_open; sc->mlxd_disk->d_close = mlxd_close; sc->mlxd_disk->d_ioctl = mlxd_ioctl; sc->mlxd_disk->d_strategy = mlxd_strategy; sc->mlxd_disk->d_name = "mlxd"; sc->mlxd_disk->d_unit = sc->mlxd_unit; sc->mlxd_disk->d_drv1 = sc; sc->mlxd_disk->d_sectorsize = MLX_BLKSIZE; sc->mlxd_disk->d_mediasize = MLX_BLKSIZE * (off_t)sc->mlxd_drive->ms_size; sc->mlxd_disk->d_fwsectors = sc->mlxd_drive->ms_sectors; sc->mlxd_disk->d_fwheads = sc->mlxd_drive->ms_heads; sc->mlxd_disk->d_flags = DISKFLAG_NEEDSGIANT; /* * Set maximum I/O size to the lesser of the recommended maximum and the practical * maximum except on v2 cards where the maximum is set to 8 pages. */ if (sc->mlxd_controller->mlx_iftype == MLX_IFTYPE_2) sc->mlxd_disk->d_maxsize = 8 * MLX_PAGE_SIZE; else { s1 = sc->mlxd_controller->mlx_enq2->me_maxblk * MLX_BLKSIZE; s2 = (sc->mlxd_controller->mlx_enq2->me_max_sg - 1) * MLX_PAGE_SIZE; sc->mlxd_disk->d_maxsize = imin(s1, s2); } disk_create(sc->mlxd_disk, DISK_VERSION); return (0); }
static struct resource * pxa_smi_alloc_resource(device_t dev, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct pxa_smi_softc *sc; struct smi_ivars *smid; struct resource *rv; struct resource_list *rl; struct resource_list_entry *rle; int needactivate; sc = (struct pxa_smi_softc *)device_get_softc(dev); smid = (struct smi_ivars *)device_get_ivars(child); rl = &smid->smid_resources; if (type == SYS_RES_IOPORT) type = SYS_RES_MEMORY; rle = resource_list_find(rl, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) panic("pxa_smi_alloc_resource: resource is busy"); needactivate = flags & RF_ACTIVE; flags &= ~RF_ACTIVE; switch (type) { case SYS_RES_MEMORY: rv = rman_reserve_resource(&sc->ps_mem, rle->start, rle->end, rle->count, flags, child); if (rv == NULL) return (NULL); rle->res = rv; rman_set_rid(rv, *rid); rman_set_bustag(rv, sc->ps_bst); rman_set_bushandle(rv, rle->start); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { rman_release_resource(rv); return (NULL); } } break; case SYS_RES_IRQ: rv = bus_alloc_resource(dev, type, rid, rle->start, rle->end, rle->count, flags); if (rv == NULL) return (NULL); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv) != 0) { bus_release_resource(dev, type, *rid, rv); return (NULL); } } break; default: return (NULL); } return (rv); }
/* * ppb_MS_microseq() * * Interprete a microsequence. Some microinstructions are executed at adapter * level to avoid function call overhead between ppbus and the adapter */ int ppb_MS_microseq(device_t bus, device_t dev, struct ppb_microseq *msq, int *ret) { struct ppb_data *ppb = (struct ppb_data *)device_get_softc(bus); struct ppb_device *ppbdev = (struct ppb_device *)device_get_ivars(dev); struct ppb_microseq *mi; /* current microinstruction */ int error; struct ppb_xfer *xfer; /* microsequence executed to initialize the transfer */ struct ppb_microseq initxfer[] = { MS_PTR(MS_UNKNOWN), /* set ptr to buffer */ MS_SET(MS_UNKNOWN), /* set transfer size */ MS_RET(0) }; mtx_assert(ppb->ppc_lock, MA_OWNED); if (ppb->ppb_owner != dev) return (EACCES); #define INCR_PC (mi ++) mi = msq; for (;;) { switch (mi->opcode) { case MS_OP_PUT: case MS_OP_GET: /* attempt to choose the best mode for the device */ xfer = mode2xfer(bus, ppbdev, mi->opcode); /* figure out if we should use ieee1284 code */ if (!xfer->loop) { if (mi->opcode == MS_OP_PUT) { if ((error = PPBUS_WRITE( device_get_parent(bus), (char *)mi->arg[0].p, mi->arg[1].i, 0))) goto error; INCR_PC; goto next; } else panic("%s: IEEE1284 read not supported", __func__); } /* XXX should use ppb_MS_init_msq() */ initxfer[0].arg[0].p = mi->arg[0].p; initxfer[1].arg[0].i = mi->arg[1].i; /* initialize transfer */ ppb_MS_microseq(bus, dev, initxfer, &error); if (error) goto error; /* the xfer microsequence should not contain any * MS_OP_PUT or MS_OP_GET! */ ppb_MS_microseq(bus, dev, xfer->loop, &error); if (error) goto error; INCR_PC; break; case MS_OP_RET: if (ret) *ret = mi->arg[0].i; /* return code */ return (0); default: /* executing microinstructions at ppc level is * faster. This is the default if the microinstr * is unknown here */ if ((error = PPBUS_EXEC_MICROSEQ( device_get_parent(bus), &mi))) goto error; break; } next: continue; } error: return (error); }
static int ndisusb_attach(device_t self) { struct drvdb_ent *db; struct ndisusb_softc *dummy = device_get_softc(self); struct usb_attach_arg *uaa = device_get_ivars(self); struct ndis_softc *sc; struct ndis_usb_type *t; driver_object *drv; int devidx = 0; usbd_status status; wlan_serialize_enter(); sc = (struct ndis_softc *)dummy; if (uaa->device == NULL) { wlan_serialize_exit(); return ENXIO; } db = windrv_match((matchfuncptr)ndisusb_devcompare, self); if (db == NULL) { wlan_serialize_exit(); return (ENXIO); } sc->ndis_dev = self; sc->ndis_dobj = db->windrv_object; sc->ndis_regvals = db->windrv_regvals; sc->ndis_iftype = PNPBus; /* Create PDO for this device instance */ drv = windrv_lookup(0, "USB Bus"); windrv_create_pdo(drv, self); status = usbd_set_config_no(uaa->device, NDISUSB_CONFIG_NO, 0); if (status != USBD_NORMAL_COMPLETION) { device_printf(self, "setting config no failed\n"); wlan_serialize_exit(); return (ENXIO); } /* Figure out exactly which device we matched. */ t = db->windrv_devlist; while (t->ndis_name != NULL) { if ((uaa->vendor == t->ndis_vid) && (uaa->product == t->ndis_did)) { sc->ndis_devidx = devidx; break; } t++; devidx++; } if (ndis_attach(self) != 0) { wlan_serialize_exit(); return ENXIO; } usbd_add_drv_event(USB_EVENT_DRIVER_ATTACH, uaa->device, self); wlan_serialize_exit(); return 0; }
static int fwe_attach(device_t dev) { struct fwe_softc *fwe; struct ifnet *ifp; int unit, s; #if defined(__DragonFly__) || __FreeBSD_version < 500000 u_char *eaddr; #else u_char eaddr[6]; #endif struct fw_eui64 *eui; fwe = ((struct fwe_softc *)device_get_softc(dev)); unit = device_get_unit(dev); bzero(fwe, sizeof(struct fwe_softc)); mtx_init(&fwe->mtx, "fwe", NULL, MTX_DEF); /* XXX */ fwe->stream_ch = stream_ch; fwe->dma_ch = -1; fwe->fd.fc = device_get_ivars(dev); if (tx_speed < 0) tx_speed = fwe->fd.fc->speed; fwe->fd.dev = dev; fwe->fd.post_explore = NULL; fwe->eth_softc.fwe = fwe; fwe->pkt_hdr.mode.stream.tcode = FWTCODE_STREAM; fwe->pkt_hdr.mode.stream.sy = 0; fwe->pkt_hdr.mode.stream.chtag = fwe->stream_ch; /* generate fake MAC address: first and last 3bytes from eui64 */ #define LOCAL (0x02) #define GROUP (0x01) #if defined(__DragonFly__) || __FreeBSD_version < 500000 eaddr = &IFP2ENADDR(fwe->eth_softc.ifp)[0]; #endif eui = &fwe->fd.fc->eui; eaddr[0] = (FW_EUI64_BYTE(eui, 0) | LOCAL) & ~GROUP; eaddr[1] = FW_EUI64_BYTE(eui, 1); eaddr[2] = FW_EUI64_BYTE(eui, 2); eaddr[3] = FW_EUI64_BYTE(eui, 5); eaddr[4] = FW_EUI64_BYTE(eui, 6); eaddr[5] = FW_EUI64_BYTE(eui, 7); printf("if_fwe%d: Fake Ethernet address: " "%02x:%02x:%02x:%02x:%02x:%02x\n", unit, eaddr[0], eaddr[1], eaddr[2], eaddr[3], eaddr[4], eaddr[5]); /* fill the rest and attach interface */ ifp = fwe->eth_softc.ifp = if_alloc(IFT_ETHER); if (ifp == NULL) { device_printf(dev, "can not if_alloc()\n"); return (ENOSPC); } ifp->if_softc = &fwe->eth_softc; #if __FreeBSD_version >= 501113 || defined(__DragonFly__) if_initname(ifp, device_get_name(dev), unit); #else ifp->if_unit = unit; ifp->if_name = "fwe"; #endif ifp->if_init = fwe_init; #if defined(__DragonFly__) || __FreeBSD_version < 500000 ifp->if_output = ether_output; #endif ifp->if_start = fwe_start; ifp->if_ioctl = fwe_ioctl; ifp->if_mtu = ETHERMTU; ifp->if_flags = (IFF_BROADCAST|IFF_SIMPLEX|IFF_MULTICAST); ifp->if_snd.ifq_maxlen = TX_MAX_QUEUE; s = splimp(); #if defined(__DragonFly__) || __FreeBSD_version < 500000 ether_ifattach(ifp, 1); #else ether_ifattach(ifp, eaddr); #endif splx(s); /* Tell the upper layer(s) we support long frames. */ ifp->if_data.ifi_hdrlen = sizeof(struct ether_vlan_header); #if defined(__FreeBSD__) && __FreeBSD_version >= 500000 ifp->if_capabilities |= IFCAP_VLAN_MTU | IFCAP_POLLING; ifp->if_capenable |= IFCAP_VLAN_MTU; #endif FWEDEBUG(ifp, "interface created\n"); return 0; }
static int ad_attach(device_t dev) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); struct ad_softc *adp; device_t parent; /* check that we have a virgin disk to attach */ if (device_get_ivars(dev)) return EEXIST; if (!(adp = malloc(sizeof(struct ad_softc), M_AD, M_NOWAIT | M_ZERO))) { device_printf(dev, "out of memory\n"); return ENOMEM; } device_set_ivars(dev, adp); /* get device geometry into internal structs */ if (ad_get_geometry(dev)) return ENXIO; /* set the max size if configured */ if (ata_setmax) ad_set_geometry(dev); /* init device parameters */ ad_init(dev); /* announce we are here */ ad_describe(dev); /* create the disk device */ adp->disk = disk_alloc(); adp->disk->d_strategy = ad_strategy; adp->disk->d_ioctl = ad_ioctl; adp->disk->d_dump = ad_dump; adp->disk->d_name = "ad"; adp->disk->d_drv1 = dev; adp->disk->d_maxsize = ch->dma.max_iosize ? ch->dma.max_iosize : DFLTPHYS; if (atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) adp->disk->d_maxsize = min(adp->disk->d_maxsize, 65536 * DEV_BSIZE); else /* 28bit ATA command limit */ adp->disk->d_maxsize = min(adp->disk->d_maxsize, 256 * DEV_BSIZE); adp->disk->d_sectorsize = DEV_BSIZE; adp->disk->d_mediasize = DEV_BSIZE * (off_t)adp->total_secs; adp->disk->d_fwsectors = adp->sectors; adp->disk->d_fwheads = adp->heads; adp->disk->d_unit = device_get_unit(dev); if (atadev->param.support.command2 & ATA_SUPPORT_FLUSHCACHE) adp->disk->d_flags |= DISKFLAG_CANFLUSHCACHE; if ((atadev->param.support.command2 & ATA_SUPPORT_CFA) || atadev->param.config == ATA_PROTO_CFA) adp->disk->d_flags |= DISKFLAG_CANDELETE; strlcpy(adp->disk->d_ident, atadev->param.serial, sizeof(adp->disk->d_ident)); strlcpy(adp->disk->d_descr, atadev->param.model, sizeof(adp->disk->d_descr)); parent = device_get_parent(ch->dev); if (parent != NULL && device_get_parent(parent) != NULL && (device_get_devclass(parent) == devclass_find("atapci") || device_get_devclass(device_get_parent(parent)) == devclass_find("pci"))) { adp->disk->d_hba_vendor = pci_get_vendor(parent); adp->disk->d_hba_device = pci_get_device(parent); adp->disk->d_hba_subvendor = pci_get_subvendor(parent); adp->disk->d_hba_subdevice = pci_get_subdevice(parent); } ata_disk_firmware_geom_adjust(adp->disk); disk_create(adp->disk, DISK_VERSION); device_add_child(dev, "subdisk", device_get_unit(dev)); bus_generic_attach(dev); callout_init(&atadev->spindown_timer, 1); return 0; }
static struct resource * obio_alloc_resource(device_t bus, device_t child, int type, int *rid, u_long start, u_long end, u_long count, u_int flags) { struct obio_softc *sc = device_get_softc(bus); struct obio_ivar *ivar = device_get_ivars(child); struct resource *rv; struct resource_list_entry *rle; struct rman *rm; int isdefault, needactivate, passthrough; isdefault = (start == 0UL && end == ~0UL); needactivate = flags & RF_ACTIVE; passthrough = (device_get_parent(child) != bus); rle = NULL; if (passthrough) return (BUS_ALLOC_RESOURCE(device_get_parent(bus), child, type, rid, start, end, count, flags)); /* * If this is an allocation of the "default" range for a given RID, * and we know what the resources for this device are (ie. they aren't * maintained by a child bus), then work out the start/end values. */ if (isdefault) { rle = resource_list_find(&ivar->resources, type, *rid); if (rle == NULL) return (NULL); if (rle->res != NULL) { panic("%s: resource entry is busy", __func__); } start = rle->start; end = rle->end; count = rle->count; } switch (type) { case SYS_RES_IRQ: rm = &sc->oba_irq_rman; break; case SYS_RES_MEMORY: rm = &sc->oba_mem_rman; break; default: printf("%s: unknown resource type %d\n", __func__, type); return (0); } rv = rman_reserve_resource(rm, start, end, count, flags, child); if (rv == 0) { printf("%s: could not reserve resource\n", __func__); return (0); } rman_set_rid(rv, *rid); if (needactivate) { if (bus_activate_resource(child, type, *rid, rv)) { printf("%s: could not activate resource\n", __func__); rman_release_resource(rv); return (0); } } return (rv); }
static int afd_strategy(struct dev_strategy_args *ap) { device_t dev = ap->a_head.a_dev->si_drv1; struct bio *bp = ap->a_bio; struct buf *bbp = bp->bio_buf; struct ata_device *atadev = device_get_softc(dev); struct afd_softc *fdp = device_get_ivars(dev); struct ata_request *request; u_int32_t lba; u_int16_t count; int8_t ccb[16]; /* if it's a null transfer, return immediatly. */ if (bbp->b_bcount == 0) { bbp->b_resid = 0; biodone(bp); return 0; } /* should reject all queued entries if media have changed. */ if (atadev->flags & ATA_D_MEDIA_CHANGED) { bbp->b_flags |= B_ERROR; bbp->b_error = EIO; biodone(bp); return 0; } lba = bp->bio_offset / fdp->sectorsize; count = bbp->b_bcount / fdp->sectorsize; bbp->b_resid = bbp->b_bcount; bzero(ccb, sizeof(ccb)); switch(bbp->b_cmd) { case BUF_CMD_READ: ccb[0] = ATAPI_READ_BIG; break; case BUF_CMD_WRITE: ccb[0] = ATAPI_WRITE_BIG; break; default: device_printf(dev, "unknown BUF operation\n"); bbp->b_flags |= B_ERROR; bbp->b_error = EIO; biodone(bp); return 0; } ccb[2] = lba >> 24; ccb[3] = lba >> 16; ccb[4] = lba >> 8; ccb[5] = lba; ccb[7] = count>>8; ccb[8] = count; if (!(request = ata_alloc_request())) { bbp->b_flags |= B_ERROR; bbp->b_error = ENOMEM; biodone(bp); return 0; } request->dev = dev; request->bio = bp; bcopy(ccb, request->u.atapi.ccb, (atadev->param.config & ATA_PROTO_MASK) == ATA_PROTO_ATAPI_12 ? 16 : 12); request->data = bbp->b_data; request->bytecount = count * fdp->sectorsize; request->transfersize = min(request->bytecount, 65534); request->timeout = (ccb[0] == ATAPI_WRITE_BIG) ? 60 : 30; request->retries = 2; request->callback = afd_done; switch (bbp->b_cmd) { case BUF_CMD_READ: request->flags = (ATA_R_ATAPI | ATA_R_READ); break; case BUF_CMD_WRITE: request->flags = (ATA_R_ATAPI | ATA_R_WRITE); break; default: panic("bbp->b_cmd"); } if (atadev->mode >= ATA_DMA) request->flags |= ATA_R_DMA; request->flags |= ATA_R_ORDERED; devstat_start_transaction(&fdp->stats); ata_queue_request(request); return 0; }
static int ad_attach(device_t dev) { struct ata_channel *ch = device_get_softc(device_get_parent(dev)); struct ata_device *atadev = device_get_softc(dev); struct disk_info info; struct ad_softc *adp; cdev_t cdev; u_int32_t lbasize; u_int64_t lbasize48; /* check that we have a virgin disk to attach */ if (device_get_ivars(dev)) return EEXIST; adp = kmalloc(sizeof(struct ad_softc), M_AD, M_INTWAIT | M_ZERO); device_set_ivars(dev, adp); if ((atadev->param.atavalid & ATA_FLAG_54_58) && atadev->param.current_heads && atadev->param.current_sectors) { adp->heads = atadev->param.current_heads; adp->sectors = atadev->param.current_sectors; adp->total_secs = (u_int32_t)atadev->param.current_size_1 | ((u_int32_t)atadev->param.current_size_2 << 16); } else { adp->heads = atadev->param.heads; adp->sectors = atadev->param.sectors; adp->total_secs = atadev->param.cylinders * adp->heads * adp->sectors; } lbasize = (u_int32_t)atadev->param.lba_size_1 | ((u_int32_t)atadev->param.lba_size_2 << 16); /* does this device need oldstyle CHS addressing */ if (!ad_version(atadev->param.version_major) || !lbasize) atadev->flags |= ATA_D_USE_CHS; /* use the 28bit LBA size if valid or bigger than the CHS mapping */ if (atadev->param.cylinders == 16383 || adp->total_secs < lbasize) adp->total_secs = lbasize; /* use the 48bit LBA size if valid */ lbasize48 = ((u_int64_t)atadev->param.lba_size48_1) | ((u_int64_t)atadev->param.lba_size48_2 << 16) | ((u_int64_t)atadev->param.lba_size48_3 << 32) | ((u_int64_t)atadev->param.lba_size48_4 << 48); if ((atadev->param.support.command2 & ATA_SUPPORT_ADDRESS48) && lbasize48 > ATA_MAX_28BIT_LBA) adp->total_secs = lbasize48; /* init device parameters */ ad_init(dev); /* create the disk device */ /* XXX TGEN Maybe use DEVSTAT_ALL_SUPPORTED, DEVSTAT_TYPE_DIRECT, DEVSTAT_PRIORITY_MAX. */ devstat_add_entry(&adp->stats, "ad", device_get_unit(dev), DEV_BSIZE, DEVSTAT_NO_ORDERED_TAGS, DEVSTAT_TYPE_DIRECT | DEVSTAT_TYPE_IF_IDE, DEVSTAT_PRIORITY_DISK); cdev = disk_create(device_get_unit(dev), &adp->disk, &ad_ops); cdev->si_drv1 = dev; if (ch->dma) cdev->si_iosize_max = ch->dma->max_iosize; else cdev->si_iosize_max = DFLTPHYS; adp->cdev = cdev; bzero(&info, sizeof(info)); info.d_media_blksize = DEV_BSIZE; /* mandatory */ info.d_media_blocks = adp->total_secs; info.d_secpertrack = adp->sectors; /* optional */ info.d_nheads = adp->heads; info.d_ncylinders = adp->total_secs/(adp->heads*adp->sectors); info.d_secpercyl = adp->sectors * adp->heads; info.d_serialno = atadev->param.serial; device_add_child(dev, "subdisk", device_get_unit(dev)); bus_generic_attach(dev); /* announce we are here */ ad_describe(dev); disk_setdiskinfo(&adp->disk, &info); return 0; }
static struct resource_list * chipc_get_resource_list(device_t dev, device_t child) { struct chipc_devinfo *dinfo = device_get_ivars(child); return (&dinfo->resources); }