static int zy7_devcfg_detach(device_t dev) { struct zy7_devcfg_softc *sc = device_get_softc(dev); if (device_is_attached(dev)) bus_generic_detach(dev); /* Get rid of /dev/devcfg0. */ if (sc->sc_ctl_dev != NULL) destroy_dev(sc->sc_ctl_dev); /* Teardown and release interrupt. */ if (sc->irq_res != NULL) { if (sc->intrhandle) bus_teardown_intr(dev, sc->irq_res, sc->intrhandle); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->irq_res), sc->irq_res); } /* Release memory resource. */ if (sc->mem_res != NULL) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->mem_res), sc->mem_res); zy7_devcfg_softc_p = NULL; DEVCFG_SC_LOCK_DESTROY(sc); return (0); }
static int sata_detach(device_t dev) { struct sata_softc *sc; sc = device_get_softc(dev); if (device_is_attached(dev)) bus_generic_detach(dev); if (sc->sc_mem_res != NULL) { bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_mem_res), sc->sc_mem_res); sc->sc_mem_res = NULL; } if (sc->sc_irq_res != NULL) { bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_irq_cookiep); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); sc->sc_irq_res = NULL; } return (0); }
int ig4iic_detach(ig4iic_softc_t *sc) { int error; if (device_is_attached(sc->dev)) { error = bus_generic_detach(sc->dev); if (error) return (error); } if (sc->smb) device_delete_child(sc->dev, sc->smb); if (sc->intr_handle) bus_teardown_intr(sc->dev, sc->intr_res, sc->intr_handle); sx_xlock(&sc->call_lock); mtx_lock(&sc->io_lock); sc->smb = NULL; sc->intr_handle = NULL; reg_write(sc, IG4_REG_INTR_MASK, 0); reg_read(sc, IG4_REG_CLR_INTR); set_controller(sc, 0); mtx_unlock(&sc->io_lock); sx_xunlock(&sc->call_lock); return (0); }
/** * Helper function for implementing DEVICE_SUSPEND(). * * This function can be used to implement DEVICE_SUSPEND() for bhnd(4) * bus implementations. It calls BUS_SUSPEND_CHILD() for each * of the device's children, in reverse order. If any call to * BUS_SUSPEND_CHILD() fails, the suspend operation is terminated and * any devices that were suspended are resumed immediately by calling * their BUS_RESUME_CHILD() methods. */ int bhnd_generic_suspend(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); /* Suspend in the reverse of attach order */ qsort(devs, ndevs, sizeof(*devs), compare_descending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; error = BUS_SUSPEND_CHILD(device_get_parent(child), child); /* On error, resume suspended devices and then terminate */ if (error) { for (int j = 0; j < i; j++) { BUS_RESUME_CHILD(device_get_parent(devs[j]), devs[j]); } goto cleanup; } } cleanup: free(devs, M_TEMP); return (error); }
static int vtblk_detach(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); VTBLK_LOCK(sc); sc->vtblk_flags |= VTBLK_FLAG_DETACH; if (device_is_attached(dev)) vtblk_stop(sc); VTBLK_UNLOCK(sc); if (sc->vtblk_tq != NULL) { taskqueue_drain(sc->vtblk_tq, &sc->vtblk_intr_task); taskqueue_free(sc->vtblk_tq); sc->vtblk_tq = NULL; } vtblk_drain(sc); if (sc->vtblk_disk != NULL) { disk_destroy(sc->vtblk_disk); sc->vtblk_disk = NULL; } if (sc->vtblk_sglist != NULL) { sglist_free(sc->vtblk_sglist); sc->vtblk_sglist = NULL; } VTBLK_LOCK_DESTROY(sc); return (0); }
/** * Helper function for implementing DEVICE_SHUTDOWN(). * * This function can be used to implement DEVICE_SHUTDOWN() for bhnd(4) * bus implementations. It calls device_shutdown() for each * of the device's children, in reverse order, terminating if * any call to device_shutdown() fails. */ int bhnd_generic_shutdown(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); /* Shutdown in the reverse of attach order */ qsort(devs, ndevs, sizeof(*devs), compare_descending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = device_shutdown(child))) goto cleanup; } cleanup: free(devs, M_TEMP); return (error); }
/** * Helper function for implementing DEVICE_RESUME(). * * This function can be used to implement DEVICE_RESUME() for bhnd(4) * bus implementations. It calls BUS_RESUME_CHILD() for each * of the device's children, in order, terminating if * any call to BUS_RESUME_CHILD() fails. */ int bhnd_generic_resume(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); qsort(devs, ndevs, sizeof(*devs), compare_ascending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = BUS_RESUME_CHILD(device_get_parent(child), child))) goto cleanup; } cleanup: free(devs, M_TEMP); return (error); }
static int zy7_ehci_detach(device_t dev) { ehci_softc_t *sc = device_get_softc(dev); sc->sc_flags &= ~EHCI_SCFLG_DONEINIT; if (device_is_attached(dev)) bus_generic_detach(dev); if (sc->sc_irq_res && sc->sc_intr_hdl) /* call ehci_detach() after ehci_init() called after * successful bus_setup_intr(). */ ehci_detach(sc); if (sc->sc_bus.bdev) { device_detach(sc->sc_bus.bdev); device_delete_child(dev, sc->sc_bus.bdev); } if (sc->sc_irq_res) { if (sc->sc_intr_hdl != NULL) bus_teardown_intr(dev, sc->sc_irq_res, sc->sc_intr_hdl); bus_release_resource(dev, SYS_RES_IRQ, rman_get_rid(sc->sc_irq_res), sc->sc_irq_res); } if (sc->sc_io_res) bus_release_resource(dev, SYS_RES_MEMORY, rman_get_rid(sc->sc_io_res), sc->sc_io_res); usb_bus_mem_free_all(&sc->sc_bus, &ehci_iterate_hw_softc); return (0); }
static int vtballoon_detach(device_t dev) { struct vtballoon_softc *sc; sc = device_get_softc(dev); if (sc->vtballoon_td != NULL) { VTBALLOON_LOCK(sc); sc->vtballoon_flags |= VTBALLOON_FLAG_DETACH; wakeup_one(sc); msleep(sc->vtballoon_td, VTBALLOON_MTX(sc), 0, "vtbdth", 0); VTBALLOON_UNLOCK(sc); sc->vtballoon_td = NULL; } if (device_is_attached(dev)) { vtballoon_pop(sc); vtballoon_stop(sc); } if (sc->vtballoon_page_frames != NULL) { free(sc->vtballoon_page_frames, M_DEVBUF); sc->vtballoon_page_frames = NULL; } VTBALLOON_LOCK_DESTROY(sc); return (0); }
static int le_isa_detach(device_t dev) { struct le_isa_softc *lesc; struct lance_softc *sc; lesc = device_get_softc(dev); sc = &lesc->sc_am7990.lsc; if (device_is_attached(dev)) { lwkt_serialize_enter(sc->ifp->if_serializer); lance_stop(sc); bus_teardown_intr(dev, lesc->sc_ires, lesc->sc_ih); lwkt_serialize_exit(sc->ifp->if_serializer); am7990_detach(&lesc->sc_am7990); } if (lesc->sc_ires) bus_release_resource(dev, SYS_RES_IRQ, lesc->sc_irid, lesc->sc_ires); if (lesc->sc_dres) bus_release_resource(dev, SYS_RES_DRQ, lesc->sc_drid, lesc->sc_dres); if (lesc->sc_rres) bus_release_resource(dev, SYS_RES_IOPORT, lesc->sc_rrid, lesc->sc_rres); if (lesc->sc_dmam) { bus_dmamap_unload(lesc->sc_dmat, lesc->sc_dmam); bus_dmamem_free(lesc->sc_dmat, sc->sc_mem, lesc->sc_dmam); } if (lesc->sc_dmat) bus_dma_tag_destroy(lesc->sc_dmat); if (lesc->sc_pdmat) bus_dma_tag_destroy(lesc->sc_pdmat); return (0); }
static int afd_open(struct disk *dp) { device_t dev = dp->d_drv1; struct ata_device *atadev = device_get_softc(dev); struct afd_softc *fdp = device_get_ivars(dev); if (!fdp) return ENXIO; if (!device_is_attached(dev)) return EBUSY; afd_test_ready(dev); afd_prevent_allow(dev, 1); if (afd_sense(dev)) device_printf(dev, "sense media type failed\n"); atadev->flags &= ~ATA_D_MEDIA_CHANGED; if (!fdp->mediasize) return ENXIO; fdp->disk->d_sectorsize = fdp->sectorsize; fdp->disk->d_mediasize = fdp->mediasize; fdp->disk->d_fwsectors = fdp->sectors; fdp->disk->d_fwheads = fdp->heads; return 0; }
static ACPI_STATUS acpi_dock_eject_child(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { device_t dock_dev, dev; ACPI_HANDLE dock_handle; dock_dev = *(device_t *)context; dock_handle = acpi_get_handle(dock_dev); if (!acpi_dock_is_ejd_device(dock_handle, handle)) goto out; ACPI_VPRINT(dock_dev, acpi_device_get_parent_softc(dock_dev), "ejecting device for %s\n", acpi_name(handle)); dev = acpi_get_device(handle); if (dev != NULL && device_is_attached(dev)) { mtx_lock(&Giant); device_detach(dev); mtx_unlock(&Giant); } acpi_SetInteger(handle, "_EJ0", 0); out: return (AE_OK); }
static int vtblk_detach(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); lwkt_serialize_enter(&sc->vtblk_slz); sc->vtblk_flags |= VTBLK_FLAG_DETACH; if (device_is_attached(dev)) vtblk_stop(sc); lwkt_serialize_exit(&sc->vtblk_slz); vtblk_drain(sc); if (sc->cdev != NULL) { disk_destroy(&sc->vtblk_disk); sc->cdev = NULL; } if (sc->vtblk_sglist != NULL) { sglist_free(sc->vtblk_sglist); sc->vtblk_sglist = NULL; } return (0); }
static int hn_nvs_init(struct hn_softc *sc) { int i, error; if (device_is_attached(sc->hn_dev)) { /* * NVS version and NDIS version MUST NOT be changed. */ if (bootverbose) { if_printf(sc->hn_ifp, "reinit NVS version 0x%x, " "NDIS version %u.%u\n", sc->hn_nvs_ver, HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver), HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver)); } error = hn_nvs_doinit(sc, sc->hn_nvs_ver); if (error) { if_printf(sc->hn_ifp, "reinit NVS version 0x%x " "failed: %d\n", sc->hn_nvs_ver, error); return (error); } goto done; } /* * Find the supported NVS version and set NDIS version accordingly. */ for (i = 0; i < nitems(hn_nvs_version); ++i) { error = hn_nvs_doinit(sc, hn_nvs_version[i]); if (!error) { sc->hn_nvs_ver = hn_nvs_version[i]; /* Set NDIS version according to NVS version. */ sc->hn_ndis_ver = HN_NDIS_VERSION_6_30; if (sc->hn_nvs_ver <= HN_NVS_VERSION_4) sc->hn_ndis_ver = HN_NDIS_VERSION_6_1; if (bootverbose) { if_printf(sc->hn_ifp, "NVS version 0x%x, " "NDIS version %u.%u\n", sc->hn_nvs_ver, HN_NDIS_VERSION_MAJOR(sc->hn_ndis_ver), HN_NDIS_VERSION_MINOR(sc->hn_ndis_ver)); } goto done; } } if_printf(sc->hn_ifp, "no NVS available\n"); return (ENXIO); done: if (sc->hn_nvs_ver >= HN_NVS_VERSION_5) sc->hn_caps |= HN_CAP_HASHVAL; return (0); }
static int kr_detach(device_t dev) { struct kr_softc *sc = device_get_softc(dev); struct ifnet *ifp = sc->kr_ifp; KASSERT(mtx_initialized(&sc->kr_mtx), ("vr mutex not initialized")); /* These should only be active if attach succeeded */ if (device_is_attached(dev)) { KR_LOCK(sc); sc->kr_detach = 1; kr_stop(sc); KR_UNLOCK(sc); taskqueue_drain(taskqueue_swi, &sc->kr_link_task); ether_ifdetach(ifp); } if (sc->kr_miibus) device_delete_child(dev, sc->kr_miibus); bus_generic_detach(dev); if (sc->kr_rx_intrhand) bus_teardown_intr(dev, sc->kr_rx_irq, sc->kr_rx_intrhand); if (sc->kr_rx_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_irq); if (sc->kr_tx_intrhand) bus_teardown_intr(dev, sc->kr_tx_irq, sc->kr_tx_intrhand); if (sc->kr_tx_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_irq); if (sc->kr_rx_und_intrhand) bus_teardown_intr(dev, sc->kr_rx_und_irq, sc->kr_rx_und_intrhand); if (sc->kr_rx_und_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_rx_und_irq); if (sc->kr_tx_ovr_intrhand) bus_teardown_intr(dev, sc->kr_tx_ovr_irq, sc->kr_tx_ovr_intrhand); if (sc->kr_tx_ovr_irq) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->kr_tx_ovr_irq); if (sc->kr_res) bus_release_resource(dev, SYS_RES_MEMORY, sc->kr_rid, sc->kr_res); if (ifp) if_free(ifp); kr_dma_free(sc); mtx_destroy(&sc->kr_mtx); return (0); }
static void acpi_cmbat_init_battery(void *arg) { struct acpi_cmbat_softc *sc; int retry, valid; device_t dev; dev = (device_t)arg; sc = device_get_softc(dev); ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization start\n"); /* * Try repeatedly to get valid data from the battery. Since the * embedded controller isn't always ready just after boot, we may have * to wait a while. */ for (retry = 0; retry < ACPI_CMBAT_RETRY_MAX; retry++, AcpiOsSleep(10000)) { /* batteries on DOCK can be ejected w/ DOCK during retrying */ if (!device_is_attached(dev)) return; if (!acpi_BatteryIsPresent(dev)) continue; /* * Only query the battery if this is the first try or the specific * type of info is still invalid. */ ACPI_SERIAL_BEGIN(cmbat); if (retry == 0 || !acpi_battery_bst_valid(&sc->bst)) { timespecclear(&sc->bst_lastupdated); acpi_cmbat_get_bst(dev); } if (retry == 0 || !acpi_battery_bif_valid(&sc->bif)) acpi_cmbat_get_bif(dev); valid = acpi_battery_bst_valid(&sc->bst) && acpi_battery_bif_valid(&sc->bif); ACPI_SERIAL_END(cmbat); if (valid) break; } if (retry == ACPI_CMBAT_RETRY_MAX) { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization failed, giving up\n"); } else { ACPI_VPRINT(dev, acpi_device_get_parent_softc(dev), "battery initialization done, tried %d times\n", retry + 1); } }
/*------------------------------------------------------------------------* * ugen_get_iface_driver * * This function generates an USB interface description for userland. * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ static int ugen_get_iface_driver(struct usb_fifo *f, struct usb_gen_descriptor *ugd) { struct usb_device *udev = f->udev; struct usb_interface *iface; const char *ptr; const char *desc; unsigned int len; unsigned int maxlen; char buf[128]; int error; DPRINTFN(6, "\n"); if ((ugd->ugd_data == NULL) || (ugd->ugd_maxlen == 0)) { /* userland pointer should not be zero */ return (EINVAL); } iface = usbd_get_iface(udev, ugd->ugd_iface_index); if ((iface == NULL) || (iface->idesc == NULL)) { /* invalid interface index */ return (EINVAL); } /* read out device nameunit string, if any */ if ((iface->subdev != NULL) && device_is_attached(iface->subdev) && (ptr = device_get_nameunit(iface->subdev)) && (desc = device_get_desc(iface->subdev))) { /* print description */ snprintf(buf, sizeof(buf), "%s: <%s>", ptr, desc); /* range checks */ maxlen = ugd->ugd_maxlen - 1; len = strlen(buf); if (len > maxlen) len = maxlen; /* update actual length, including terminating zero */ ugd->ugd_actlen = len + 1; /* copy out interface description */ error = copyout(buf, ugd->ugd_data, ugd->ugd_actlen); } else { /* zero length string is default */ error = copyout("", ugd->ugd_data, 1); } return (error); }
static void vtpci_describe_features(struct vtpci_softc *sc, const char *msg, uint64_t features) { device_t dev, child; dev = sc->vtpci_dev; child = sc->vtpci_child_dev; if (device_is_attached(child) || bootverbose == 0) return; virtio_describe(dev, msg, features, sc->vtpci_child_feat_desc); }
int acpi_pci_suspend(device_t dev) { int dstate, error, i, numdevs; device_t acpi_dev, child, *devlist; struct pci_devinfo *dinfo; acpi_dev = devclass_get_device(devclass_find("acpi"), 0); device_get_children(dev, &devlist, &numdevs); /* * Save the PCI configuration space for each child and set the * device in the appropriate power state for this sleep state. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dinfo = (struct pci_devinfo *)device_get_ivars(child); pci_cfg_save(child, dinfo, 0); } /* * Suspend devices before potentially powering them down. */ error = bus_generic_suspend(dev); if (error) { kfree(devlist, M_TEMP); return (error); } /* * Always set the device to D3. If ACPI suggests a different * power state, use it instead. If ACPI is not present, the * firmware is responsible for managing device power. Skip * children who aren't attached since they are powered down * separately. Only manage type 0 devices for now. */ for (i = 0; acpi_dev && i < numdevs; i++) { child = devlist[i]; dinfo = (struct pci_devinfo *)device_get_ivars(child); if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) { dstate = PCI_POWERSTATE_D3; ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate); pci_set_powerstate(child, dstate); } } kfree(devlist, M_TEMP); return (0); }
static int ichss_probe(device_t dev) { device_t est_dev, perf_dev; int error, type; /* * If the ACPI perf driver has attached and is not just offering * info, let it manage things. Also, if Enhanced SpeedStep is * available, don't attach. */ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); if (perf_dev && device_is_attached(perf_dev)) { error = CPUFREQ_DRV_TYPE(perf_dev, &type); if (error == 0 && (type & CPUFREQ_FLAG_INFO_ONLY) == 0) return (ENXIO); } est_dev = device_find_child(device_get_parent(dev), "est", -1); if (est_dev && device_is_attached(est_dev)) return (ENXIO); device_set_desc(dev, "SpeedStep ICH"); return (-1000); }
static int rtw_pci_detach(device_t dev) { struct rtw_softc *sc = device_get_softc(dev); struct rtw_regs *regs = &sc->sc_regs; if (device_is_attached(dev)) rtw_detach(dev); if (regs->r_res != NULL) { bus_release_resource(dev, regs->r_type, regs->r_rid, regs->r_res); } return 0; }
static int ad_open(struct dev_open_args *ap) { device_t dev = ap->a_head.a_dev->si_drv1; struct ad_softc *adp = device_get_ivars(dev); if (!adp || adp->cdev == NULL) return ENXIO; if(!device_is_attached(dev)) return EBUSY; #if 0 /* XXX TGEN Probably useless, queue will be failed on detach. */ adp->ad_flags &= AD_DISK_OPEN; #endif /* 0 */ return 0; }
/** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); mtx_lock(&Giant); /* for newbus */ parent = device_get_parent(sc->dev); hostb = bhnd_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { mtx_unlock(&Giant); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); mtx_unlock(&Giant); return (result); }
int dtsec_detach(device_t dev) { struct dtsec_softc *sc; if_t ifp; sc = device_get_softc(dev); ifp = sc->sc_ifnet; if (device_is_attached(dev)) { ether_ifdetach(ifp); /* Shutdown interface */ DTSEC_LOCK(sc); dtsec_if_deinit_locked(sc); DTSEC_UNLOCK(sc); } if (sc->sc_ifnet) { if_free(sc->sc_ifnet); sc->sc_ifnet = NULL; } if (sc->sc_mode == DTSEC_MODE_REGULAR) { /* Free RX/TX FQRs */ dtsec_rm_fqr_rx_free(sc); dtsec_rm_fqr_tx_free(sc); /* Free frame info pool */ dtsec_rm_fi_pool_free(sc); /* Free RX buffer pool */ dtsec_rm_pool_rx_free(sc); } dtsec_fm_mac_free(sc); dtsec_fm_port_free_both(sc); /* Destroy lock */ mtx_destroy(&sc->sc_lock); return (0); }
/** * Helper function for implementing DEVICE_ATTACH(). * * This function can be used to implement DEVICE_ATTACH() for bhnd(4) * bus implementations. It calls device_probe_and_attach() for each * of the device's children, in order. */ int bhnd_generic_attach(device_t dev) { device_t *devs; int ndevs; int error; if (device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); qsort(devs, ndevs, sizeof(*devs), compare_ascending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; device_probe_and_attach(child); } free(devs, M_TEMP); return (0); }
static int sbsh_detach(device_t dev) { struct sbsh_softc *sc = device_get_softc(dev); struct ifnet *ifp = &sc->arpcom.ac_if; if (device_is_attached(dev)) { lwkt_serialize_enter(ifp->if_serializer); sbsh_stop(sc); bus_teardown_intr(dev, sc->irq_res, sc->intr_hand); lwkt_serialize_exit(ifp->if_serializer); ether_ifdetach(ifp); } if (sc->irq_res) bus_release_resource(dev, SYS_RES_IRQ, 0, sc->irq_res); if (sc->mem_res) { bus_release_resource(dev, SYS_RES_MEMORY, PCIR_MAPS + 4, sc->mem_res); } return (0); }
static int afd_open(struct dev_open_args *ap) { device_t dev = ap->a_head.a_dev->si_drv1; struct ata_device *atadev = device_get_softc(dev); struct afd_softc *fdp = device_get_ivars(dev); struct disk_info info; if (!fdp) return ENXIO; if (!device_is_attached(dev)) return EBUSY; afd_test_ready(dev); afd_prevent_allow(dev, 1); if (afd_sense(dev)) device_printf(dev, "sense media type failed\n"); atadev->flags &= ~ATA_D_MEDIA_CHANGED; if (!fdp->mediasize) return ENXIO; bzero(&info, sizeof(info)); info.d_media_blksize = fdp->sectorsize; /* mandatory */ info.d_media_size = fdp->mediasize; /* (this is in bytes) */ info.d_secpertrack = fdp->sectors; /* optional */ info.d_nheads = fdp->heads; info.d_ncylinders = ((fdp->mediasize/fdp->sectorsize)/fdp->sectors)/fdp->heads; info.d_secpercyl = fdp->sectors * fdp->heads; disk_setdiskinfo(&fdp->disk, &info); return 0; }
int acpi_pci_resume(device_t dev) { int i, numdevs; device_t acpi_dev, child, *devlist; struct pci_devinfo *dinfo; acpi_dev = devclass_get_device(devclass_find("acpi"), 0); device_get_children(dev, &devlist, &numdevs); /* * Set each child to D0 and restore its PCI configuration space. */ for (i = 0; i < numdevs; i++) { /* * Notify ACPI we're going to D0 but ignore the result. If * ACPI is not present, the firmware is responsible for * managing device power. Only manage type 0 devices for now. */ child = devlist[i]; dinfo = (struct pci_devinfo *) device_get_ivars(child); if (acpi_dev && device_is_attached(child) && dinfo->cfg.hdrtype == 0) { ACPI_PWR_FOR_SLEEP(acpi_dev, child, NULL); pci_set_powerstate(child, PCI_POWERSTATE_D0); } /* * Now the device is powered up, restore its config space. */ pci_cfg_restore(child, dinfo); } kfree(devlist, M_TEMP); return (bus_generic_resume(dev)); }
/*------------------------------------------------------------------------* * usb_handle_iface_request * * Returns: * 0: Success * Else: Failure *------------------------------------------------------------------------*/ static usb_error_t usb_handle_iface_request(struct usb_xfer *xfer, void **ppdata, uint16_t *plen, struct usb_device_request req, uint16_t off, uint8_t state) { struct usb_interface *iface; struct usb_interface *iface_parent; /* parent interface */ struct usb_device *udev = xfer->xroot->udev; int error; uint8_t iface_index; uint8_t temp_state; if ((req.bmRequestType & 0x1F) == UT_INTERFACE) { iface_index = req.wIndex[0]; /* unicast */ } else { iface_index = 0; /* broadcast */ } /* * We need to protect against other threads doing probe and * attach: */ USB_XFER_UNLOCK(xfer); usbd_enum_lock(udev); error = ENXIO; tr_repeat: iface = usbd_get_iface(udev, iface_index); if ((iface == NULL) || (iface->idesc == NULL)) { /* end of interfaces non-existing interface */ goto tr_stalled; } /* set initial state */ temp_state = state; /* forward request to interface, if any */ if ((error != 0) && (error != ENOTTY) && (iface->subdev != NULL) && device_is_attached(iface->subdev)) { #if 0 DEVMETHOD(usb_handle_request, NULL); /* dummy */ #endif error = USB_HANDLE_REQUEST(iface->subdev, &req, ppdata, plen, off, &temp_state); } iface_parent = usbd_get_iface(udev, iface->parent_iface_index); if ((iface_parent == NULL) || (iface_parent->idesc == NULL)) { /* non-existing interface */ iface_parent = NULL; } /* forward request to parent interface, if any */ if ((error != 0) && (error != ENOTTY) && (iface_parent != NULL) && (iface_parent->subdev != NULL) && ((req.bmRequestType & 0x1F) == UT_INTERFACE) && (iface_parent->subdev != iface->subdev) && device_is_attached(iface_parent->subdev)) { error = USB_HANDLE_REQUEST(iface_parent->subdev, &req, ppdata, plen, off, &temp_state); } if (error == 0) { /* negativly adjust pointer and length */ *ppdata = ((uint8_t *)(*ppdata)) - off; *plen += off; if ((state == USB_HR_NOT_COMPLETE) && (temp_state == USB_HR_COMPLETE_OK)) goto tr_short; else goto tr_valid; } else if (error == ENOTTY) { goto tr_stalled; } if ((req.bmRequestType & 0x1F) != UT_INTERFACE) { iface_index++; /* iterate */ goto tr_repeat; } if (state != USB_HR_NOT_COMPLETE) { /* we are complete */ goto tr_valid; } switch (req.bmRequestType) { case UT_WRITE_INTERFACE: switch (req.bRequest) { case UR_SET_INTERFACE: /* * We assume that the endpoints are the same * accross the alternate settings. * * Reset the endpoints, because re-attaching * only a part of the device is not possible. */ error = usb_check_alt_setting(udev, iface, req.wValue[0]); if (error) { DPRINTF("alt setting does not exist %s\n", usbd_errstr(error)); goto tr_stalled; } error = usb_reset_iface_endpoints(udev, iface_index); if (error) { DPRINTF("alt setting failed %s\n", usbd_errstr(error)); goto tr_stalled; } /* update the current alternate setting */ iface->alt_index = req.wValue[0]; break; default: goto tr_stalled; } break; case UT_READ_INTERFACE: switch (req.bRequest) { case UR_GET_INTERFACE: *ppdata = &iface->alt_index; *plen = 1; break; default: goto tr_stalled; } break; default: goto tr_stalled; } tr_valid: usbd_enum_unlock(udev); USB_XFER_LOCK(xfer); return (0); tr_short: usbd_enum_unlock(udev); USB_XFER_LOCK(xfer); return (USB_ERR_SHORT_XFER); tr_stalled: usbd_enum_unlock(udev); USB_XFER_LOCK(xfer); return (USB_ERR_STALLED); }
static int hwpstate_probe(device_t dev) { struct hwpstate_softc *sc; device_t perf_dev; uint64_t msr; int error, type; /* * Only hwpstate0. * It goes well with acpi_throttle. */ if (device_get_unit(dev) != 0) return (ENXIO); sc = device_get_softc(dev); sc->dev = dev; /* * Check if acpi_perf has INFO only flag. */ perf_dev = device_find_child(device_get_parent(dev), "acpi_perf", -1); error = TRUE; if (perf_dev && device_is_attached(perf_dev)) { error = CPUFREQ_DRV_TYPE(perf_dev, &type); if (error == 0) { if ((type & CPUFREQ_FLAG_INFO_ONLY) == 0) { /* * If acpi_perf doesn't have INFO_ONLY flag, * it will take care of pstate transitions. */ HWPSTATE_DEBUG(dev, "acpi_perf will take care of pstate transitions.\n"); return (ENXIO); } else { /* * If acpi_perf has INFO_ONLY flag, (_PCT has FFixedHW) * we can get _PSS info from acpi_perf * without going into ACPI. */ HWPSTATE_DEBUG(dev, "going to fetch info from acpi_perf\n"); error = hwpstate_get_info_from_acpi_perf(dev, perf_dev); } } } if (error == 0) { /* * Now we get _PSS info from acpi_perf without error. * Let's check it. */ msr = rdmsr(MSR_AMD_10H_11H_LIMIT); if (sc->cfnum != 1 + AMD_10H_11H_GET_PSTATE_MAX_VAL(msr)) { HWPSTATE_DEBUG(dev, "msr and acpi _PSS count mismatch.\n"); error = TRUE; } } /* * If we cannot get info from acpi_perf, * Let's get info from MSRs. */ if (error) error = hwpstate_get_info_from_msr(dev); if (error) return (error); device_set_desc(dev, "Cool`n'Quiet 2.0"); return (0); }