/* Slicer operates on the NAND controller, so we have to find the chip. */ static int rb_nand_slicer(device_t dev, struct flash_slice *slices, int *nslices) { struct nand_chip *chip; device_t *children; int n; if (device_get_children(dev, &children, &n) != 0) { panic("Slicer called on controller with no child!"); } dev = children[0]; free(children, M_TEMP); if (device_get_children(dev, &children, &n) != 0) { panic("Slicer called on controller with nandbus but no child!"); } dev = children[0]; free(children, M_TEMP); chip = device_get_softc(dev); *nslices = 2; slices[0].base = 0; slices[0].size = 4 * 1024 * 1024; slices[0].label = "boot"; slices[1].base = 4 * 1024 * 1024; slices[1].size = chip->ndisk->d_mediasize - slices[0].size; slices[1].label = "rootfs"; return (0); }
/* * Since this is not a self-enumerating bus, and since we always add * children in attach, we have to always delete children here. */ static int gpiobus_detach(device_t dev) { struct gpiobus_softc *sc; struct gpiobus_ivar *devi; device_t *devlist; int i, err, ndevs; sc = GPIOBUS_SOFTC(dev); KASSERT(mtx_initialized(&sc->sc_mtx), ("gpiobus mutex not initialized")); GPIOBUS_LOCK_DESTROY(sc); if ((err = bus_generic_detach(dev)) != 0) return (err); if ((err = device_get_children(dev, &devlist, &ndevs)) != 0) return (err); for (i = 0; i < ndevs; i++) { device_delete_child(dev, devlist[i]); devi = GPIOBUS_IVAR(devlist[i]); if (devi->pins) { free(devi->pins, M_DEVBUF); devi->pins = NULL; } } free(devlist, M_TEMP); if (sc->sc_pins_mapped) { free(sc->sc_pins_mapped, M_DEVBUF); sc->sc_pins_mapped = NULL; } return (0); }
int ata_pci_detach(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(dev); device_t *children; int nchildren, i; /* detach & delete all children */ if (!device_get_children(dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) device_delete_child(dev, children[i]); kfree(children, M_TEMP); } if (ctlr->r_irq) { bus_teardown_intr(dev, ctlr->r_irq, ctlr->handle); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ctlr->r_irq); ctlr->r_irq = NULL; } if (ctlr->r_res2) { bus_release_resource(dev, ctlr->r_type2, ctlr->r_rid2, ctlr->r_res2); ctlr->r_res2 = NULL; } if (ctlr->r_res1) { bus_release_resource(dev, ctlr->r_type1, ctlr->r_rid1, ctlr->r_res1); ctlr->r_res1 = NULL; } return 0; }
static void ata_ali_reset(device_t dev) { struct ata_pci_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); device_t *children; int nchildren, i; ata_generic_reset(dev); /* * workaround for datacorruption bug found on at least SUN Blade-100 * find the ISA function on the southbridge and disable then enable * the ATA channel tristate buffer */ if (ctlr->chip->chiprev == 0xc3 || ctlr->chip->chiprev == 0xc2) { if (!device_get_children(GRANDPARENT(dev), &children, &nchildren)) { for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == ATA_ALI_1533) { pci_write_config(children[i], 0x58, pci_read_config(children[i], 0x58, 1) & ~(0x04 << ch->unit), 1); pci_write_config(children[i], 0x58, pci_read_config(children[i], 0x58, 1) | (0x04 << ch->unit), 1); break; } } free(children, M_TEMP); } } }
static int pccard_open(struct cdev *dev, int oflags, int devtype, struct thread *td) { device_t parent, child; device_t *kids; int cnt, err; struct pccard_softc *sc; sc = dev->si_drv1; if (sc->cis_open) return (EBUSY); parent = sc->dev; err = device_get_children(parent, &kids, &cnt); if (err) return err; if (cnt == 0) { free(kids, M_TEMP); sc->cis_open++; sc->cis = NULL; return (0); } child = kids[0]; free(kids, M_TEMP); sc->cis = malloc(sizeof(*sc->cis), M_TEMP, M_ZERO | M_WAITOK); err = pccard_scan_cis(parent, child, pccard_build_cis, sc->cis); if (err) { free(sc->cis, M_TEMP); sc->cis = NULL; return (err); } sc->cis_open++; return (0); }
static void hdspe_intr(void *p) { struct sc_info *sc = (struct sc_info *)p; struct sc_pcminfo *scp; device_t *devlist; int devcount, status; int i, err; snd_mtxlock(sc->lock); status = hdspe_read_1(sc, HDSPE_STATUS_REG); if (status & HDSPE_AUDIO_IRQ_PENDING) { if ((err = device_get_children(sc->dev, &devlist, &devcount)) != 0) return; for (i = 0; i < devcount; i++) { scp = device_get_ivars(devlist[i]); if (scp->ih != NULL) scp->ih(scp); } hdspe_write_1(sc, HDSPE_INTERRUPT_ACK, 0); free(devlist, M_TEMP); } snd_mtxunlock(sc->lock); }
/** * Helper function for implementing DEVICE_SUSPEND(). * * This function can be used to implement DEVICE_SUSPEND() for bhnd(4) * bus implementations. It calls BUS_SUSPEND_CHILD() for each * of the device's children, in reverse order. If any call to * BUS_SUSPEND_CHILD() fails, the suspend operation is terminated and * any devices that were suspended are resumed immediately by calling * their BUS_RESUME_CHILD() methods. */ int bhnd_generic_suspend(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); /* Suspend in the reverse of attach order */ qsort(devs, ndevs, sizeof(*devs), compare_descending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; error = BUS_SUSPEND_CHILD(device_get_parent(child), child); /* On error, resume suspended devices and then terminate */ if (error) { for (int j = 0; j < i; j++) { BUS_RESUME_CHILD(device_get_parent(devs[j]), devs[j]); } goto cleanup; } } cleanup: free(devs, M_TEMP); return (error); }
static int cardbus_detach_card(device_t cbdev) { int numdevs; device_t *devlist; int tmp; int err = 0; if (device_get_children(cbdev, &devlist, &numdevs) != 0) return (ENOENT); if (numdevs == 0) { free(devlist, M_TEMP); return (ENOENT); } for (tmp = 0; tmp < numdevs; tmp++) { struct cardbus_devinfo *dinfo = device_get_ivars(devlist[tmp]); if (dinfo->pci.cfg.dev != devlist[tmp]) device_printf(cbdev, "devinfo dev mismatch\n"); cardbus_device_destroy(dinfo); pci_delete_child(cbdev, devlist[tmp]); } POWER_DISABLE_SOCKET(device_get_parent(cbdev), cbdev); free(devlist, M_TEMP); return (err); }
/** * Helper function for implementing DEVICE_SHUTDOWN(). * * This function can be used to implement DEVICE_SHUTDOWN() for bhnd(4) * bus implementations. It calls device_shutdown() for each * of the device's children, in reverse order, terminating if * any call to device_shutdown() fails. */ int bhnd_generic_shutdown(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); /* Shutdown in the reverse of attach order */ qsort(devs, ndevs, sizeof(*devs), compare_descending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = device_shutdown(child))) goto cleanup; } cleanup: free(devs, M_TEMP); return (error); }
/** * Helper function for implementing DEVICE_RESUME(). * * This function can be used to implement DEVICE_RESUME() for bhnd(4) * bus implementations. It calls BUS_RESUME_CHILD() for each * of the device's children, in order, terminating if * any call to BUS_RESUME_CHILD() fails. */ int bhnd_generic_resume(device_t dev) { device_t *devs; int ndevs; int error; if (!device_is_attached(dev)) return (EBUSY); if ((error = device_get_children(dev, &devs, &ndevs))) return (error); qsort(devs, ndevs, sizeof(*devs), compare_ascending_probe_order); for (int i = 0; i < ndevs; i++) { device_t child = devs[i]; /* Terminate on first error */ if ((error = BUS_RESUME_CHILD(device_get_parent(child), child))) goto cleanup; } cleanup: free(devs, M_TEMP); return (error); }
static int mvs_detach(device_t dev) { struct mvs_controller *ctlr = device_get_softc(dev); device_t *children; int nchildren, i; /* Detach & delete all children */ if (!device_get_children(dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) device_delete_child(dev, children[i]); free(children, M_TEMP); } /* Free interrupt. */ if (ctlr->irq.r_irq) { bus_teardown_intr(dev, ctlr->irq.r_irq, ctlr->irq.handle); bus_release_resource(dev, SYS_RES_IRQ, ctlr->irq.r_irq_rid, ctlr->irq.r_irq); } /* Free memory. */ rman_fini(&ctlr->sc_iomem); if (ctlr->r_mem) bus_release_resource(dev, SYS_RES_MEMORY, ctlr->r_rid, ctlr->r_mem); mtx_destroy(&ctlr->mtx); return (0); }
/** * @brief Return child of bus whose phandle is node * * A direct child of @p will be returned if it its phandle in the * OFW tree is @p node. Otherwise, NULL is returned. * * @param bus The bus to examine * @param node The phandle_t to look for. */ device_t ofw_bus_find_child_device_by_phandle(device_t bus, phandle_t node) { device_t *children, retval, child; int nkid, i; /* * Nothing can match the flag value for no node. */ if (node == -1) return (NULL); /* * Search the children for a match. We microoptimize * a bit by not using ofw_bus_get since we already know * the parent. We do not recurse. */ if (device_get_children(bus, &children, &nkid) != 0) return (NULL); retval = NULL; for (i = 0; i < nkid; i++) { child = children[i]; if (OFW_BUS_GET_NODE(bus, child) == node) { retval = child; break; } } free(children, M_TEMP); return (retval); }
static int mfi_pci_detach(device_t dev) { struct mfi_softc *sc; int error, devcount, i; device_t *devlist; sc = device_get_softc(dev); sx_xlock(&sc->mfi_config_lock); mtx_lock(&sc->mfi_io_lock); if ((sc->mfi_flags & MFI_FLAGS_OPEN) != 0) { mtx_unlock(&sc->mfi_io_lock); sx_xunlock(&sc->mfi_config_lock); return (EBUSY); } sc->mfi_detaching = 1; mtx_unlock(&sc->mfi_io_lock); if ((error = device_get_children(sc->mfi_dev, &devlist, &devcount)) != 0) { sx_xunlock(&sc->mfi_config_lock); return error; } for (i = 0; i < devcount; i++) device_delete_child(sc->mfi_dev, devlist[i]); free(devlist, M_TEMP); sx_xunlock(&sc->mfi_config_lock); EVENTHANDLER_DEREGISTER(shutdown_final, sc->mfi_eh); mfi_shutdown(sc); mfi_free(sc); mfi_pci_free(sc); return (0); }
static int ata_cbuschannel_probe(device_t dev) { struct ata_cbus_controller *ctlr = device_get_softc(device_get_parent(dev)); struct ata_channel *ch = device_get_softc(dev); device_t *children; int count, i; /* find channel number on this controller */ device_get_children(device_get_parent(dev), &children, &count); for (i = 0; i < count; i++) { if (children[i] == dev) ch->unit = i; } free(children, M_TEMP); /* setup the resource vectors */ for (i = ATA_DATA; i <= ATA_COMMAND; i ++) { ch->r_io[i].res = ctlr->io; ch->r_io[i].offset = i << 1; } ch->r_io[ATA_CONTROL].res = ctlr->ctlio; ch->r_io[ATA_CONTROL].offset = 0; ch->r_io[ATA_IDX_ADDR].res = ctlr->io; ata_default_registers(dev); /* initialize softc for this channel */ ch->flags |= ATA_USE_16BIT; ata_generic_hw(dev); return ata_probe(dev); }
static void ata_via_southbridge_fixup(device_t dev) { device_t *children; int nchildren, i; if (device_get_children(device_get_parent(dev), &children, &nchildren)) return; for (i = 0; i < nchildren; i++) { if (pci_get_devid(children[i]) == ATA_VIA8363 || pci_get_devid(children[i]) == ATA_VIA8371 || pci_get_devid(children[i]) == ATA_VIA8662 || pci_get_devid(children[i]) == ATA_VIA8361) { u_int8_t reg76 = pci_read_config(children[i], 0x76, 1); if ((reg76 & 0xf0) != 0xd0) { device_printf(dev, "Correcting VIA config for southbridge data corruption bug\n"); pci_write_config(children[i], 0x75, 0x80, 1); pci_write_config(children[i], 0x76, (reg76 & 0x0f) | 0xd0, 1); } break; } } free(children, M_TEMP); }
int ata_detach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); device_t *children; int nchildren, i; /* check that we have a valid channel to detach */ if (!ch->r_irq) return ENXIO; /* grap the channel lock so no new requests gets launched */ mtx_lock(&ch->state_mtx); ch->state |= ATA_STALL_QUEUE; mtx_unlock(&ch->state_mtx); /* detach & delete all children */ if (!device_get_children(dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) if (children[i]) device_delete_child(dev, children[i]); free(children, M_TEMP); } /* release resources */ bus_teardown_intr(dev, ch->r_irq, ch->ih); bus_release_resource(dev, SYS_RES_IRQ, ATA_IRQ_RID, ch->r_irq); ch->r_irq = NULL; mtx_destroy(&ch->state_mtx); mtx_destroy(&ch->queue_mtx); return 0; }
static ACPI_STATUS acpi_pci_save_handle(ACPI_HANDLE handle, UINT32 level, void *context, void **status) { struct acpi_pci_devinfo *dinfo; device_t *devlist; int devcount, i, func, slot; UINT32 address; ACPI_FUNCTION_TRACE((char *)(uintptr_t)__func__); if (ACPI_FAILURE(acpi_GetInteger(handle, "_ADR", &address))) return_ACPI_STATUS (AE_OK); slot = ACPI_ADR_PCI_SLOT(address); func = ACPI_ADR_PCI_FUNC(address); if (device_get_children((device_t)context, &devlist, &devcount) != 0) return_ACPI_STATUS (AE_OK); for (i = 0; i < devcount; i++) { dinfo = device_get_ivars(devlist[i]); if (dinfo->ap_dinfo.cfg.func == func && dinfo->ap_dinfo.cfg.slot == slot) { dinfo->ap_handle = handle; acpi_pci_update_device(handle, devlist[i]); break; } } free(devlist, M_TEMP); return_ACPI_STATUS (AE_OK); }
/* * Generic version of iicbus_transfer that calls the appropriate * routines to accomplish this. See note above about acceptable * buffer addresses. */ int iicbus_transfer_gen(device_t dev, struct iic_msg *msgs, uint32_t nmsgs) { int i, error, lenread, lenwrote, nkid, rpstart, addr; device_t *children, bus; bool nostop; if ((error = device_get_children(dev, &children, &nkid)) != 0) return (IIC_ERESOURCE); if (nkid != 1) { free(children, M_TEMP); return (IIC_ENOTSUPP); } bus = children[0]; rpstart = 0; free(children, M_TEMP); nostop = iicbus_get_nostop(dev); for (i = 0, error = 0; i < nmsgs && error == 0; i++) { addr = msgs[i].slave; if (msgs[i].flags & IIC_M_RD) addr |= LSB; else addr &= ~LSB; if (!(msgs[i].flags & IIC_M_NOSTART)) { if (rpstart) error = iicbus_repeated_start(bus, addr, 0); else error = iicbus_start(bus, addr, 0); } if (error != 0) break; if (msgs[i].flags & IIC_M_RD) error = iicbus_read(bus, msgs[i].buf, msgs[i].len, &lenread, IIC_LAST_READ, 0); else error = iicbus_write(bus, msgs[i].buf, msgs[i].len, &lenwrote, 0); if (error != 0) break; if ((msgs[i].flags & IIC_M_NOSTOP) != 0 || (nostop && i + 1 < nmsgs)) { rpstart = 1; /* Next message gets repeated start */ } else { rpstart = 0; iicbus_stop(bus); } } if (error != 0 && !nostop) iicbus_stop(bus); return (error); }
static int gic_v3_acpi_attach(device_t dev) { struct gic_v3_softc *sc; int err; sc = device_get_softc(dev); sc->dev = dev; sc->gic_bus = GIC_BUS_ACPI; err = gic_v3_acpi_count_regions(dev); if (err != 0) goto error; err = gic_v3_attach(dev); if (err != 0) goto error; sc->gic_pic = intr_pic_register(dev, ACPI_INTR_XREF); if (sc->gic_pic == NULL) { device_printf(dev, "could not register PIC\n"); err = ENXIO; goto error; } if (intr_pic_claim_root(dev, ACPI_INTR_XREF, arm_gic_v3_intr, sc, GIC_LAST_SGI - GIC_FIRST_SGI + 1) != 0) { err = ENXIO; goto error; } /* * Try to register the ITS driver to this GIC. The GIC will act as * a bus in that case. Failure here will not affect the main GIC * functionality. */ gic_v3_acpi_bus_attach(dev); if (device_get_children(dev, &sc->gic_children, &sc->gic_nchildren) !=0) sc->gic_nchildren = 0; return (0); error: if (bootverbose) { device_printf(dev, "Failed to attach. Error %d\n", err); } /* Failure so free resources */ gic_v3_detach(dev); return (err); }
int acpi_pci_suspend(device_t dev) { int dstate, error, i, numdevs; device_t acpi_dev, child, *devlist; struct pci_devinfo *dinfo; acpi_dev = devclass_get_device(devclass_find("acpi"), 0); device_get_children(dev, &devlist, &numdevs); /* * Save the PCI configuration space for each child and set the * device in the appropriate power state for this sleep state. */ for (i = 0; i < numdevs; i++) { child = devlist[i]; dinfo = (struct pci_devinfo *)device_get_ivars(child); pci_cfg_save(child, dinfo, 0); } /* * Suspend devices before potentially powering them down. */ error = bus_generic_suspend(dev); if (error) { kfree(devlist, M_TEMP); return (error); } /* * Always set the device to D3. If ACPI suggests a different * power state, use it instead. If ACPI is not present, the * firmware is responsible for managing device power. Skip * children who aren't attached since they are powered down * separately. Only manage type 0 devices for now. */ for (i = 0; acpi_dev && i < numdevs; i++) { child = devlist[i]; dinfo = (struct pci_devinfo *)device_get_ivars(child); if (device_is_attached(child) && dinfo->cfg.hdrtype == 0) { dstate = PCI_POWERSTATE_D3; ACPI_PWR_FOR_SLEEP(acpi_dev, child, &dstate); pci_set_powerstate(child, dstate); } } kfree(devlist, M_TEMP); return (0); }
static int siba_bwn_shutdown(device_t dev) { device_t *devlistp; int devcnt, error = 0, i; error = device_get_children(dev, &devlistp, &devcnt); if (error != 0) return (error); for (i = 0 ; i < devcnt ; i++) device_shutdown(devlistp[i]); kfree(devlistp, M_TEMP); return (0); }
/* * Set the ARI_EN bit in the lowest-numbered PCI function with the SR-IOV * capability. This bit is only writeable on the lowest-numbered PF but * affects all PFs on the device. */ static int pci_iov_set_ari(device_t bus) { device_t lowest; device_t *devlist; int i, error, devcount, lowest_func, lowest_pos, iov_pos, dev_func; uint16_t iov_ctl; /* If ARI is disabled on the downstream port there is nothing to do. */ if (!PCIB_ARI_ENABLED(device_get_parent(bus))) return (0); error = device_get_children(bus, &devlist, &devcount); if (error != 0) return (error); lowest = NULL; for (i = 0; i < devcount; i++) { if (pci_find_extcap(devlist[i], PCIZ_SRIOV, &iov_pos) == 0) { dev_func = pci_get_function(devlist[i]); if (lowest == NULL || dev_func < lowest_func) { lowest = devlist[i]; lowest_func = dev_func; lowest_pos = iov_pos; } } } free(devlist, M_TEMP); /* * If we called this function some device must have the SR-IOV * capability. */ KASSERT(lowest != NULL, ("Could not find child of %s with SR-IOV capability", device_get_nameunit(bus))); iov_ctl = pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2); iov_ctl |= PCIM_SRIOV_ARI_EN; pci_write_config(lowest, lowest_pos + PCIR_SRIOV_CTL, iov_ctl, 2); if ((pci_read_config(lowest, lowest_pos + PCIR_SRIOV_CTL, 2) & PCIM_SRIOV_ARI_EN) == 0) { device_printf(lowest, "failed to enable ARI\n"); return (ENXIO); } return (0); }
/* * Since this is not a self-enumerating bus, and since we always add * children in attach, we have to always delete children here. */ static int spibus_detach(device_t dev) { int err, ndevs, i; device_t *devlist; if ((err = bus_generic_detach(dev)) != 0) return (err); if ((err = device_get_children(dev, &devlist, &ndevs)) != 0) return (err); for (i = 0; i < ndevs; i++) device_delete_child(dev, devlist[i]); free(devlist, M_TEMP); return (0); }
void nandbus_destroy(device_t nfc) { device_t *children; int nchildren, i; mtx_lock(&Giant); /* Detach & delete all children */ if (!device_get_children(nfc, &children, &nchildren)) { for (i = 0; i < nchildren; i++) device_delete_child(nfc, children[i]); free(children, M_TEMP); } mtx_unlock(&Giant); }
static int lebuffer_detach(device_t dev) { device_t *children; int i, nchildren; bus_generic_detach(dev); if (device_get_children(dev, &children, &nchildren) == 0) { for (i = 0; i < nchildren; i++) { lebuffer_destroy_dinfo(device_get_ivars(children[i])); device_delete_child(dev, children[i]); } free(children, M_TEMP); } return (0); }
/** * Examine bus state and make a best effort determination of whether it's * likely safe to enable the muxed SPROM pins. * * On devices that do not use SPROM pin muxing, always returns true. * * @param sc chipc driver state. */ static bool chipc_should_enable_muxed_sprom(struct chipc_softc *sc) { device_t *devs; device_t hostb; device_t parent; int devcount; int error; bool result; /* Nothing to do? */ if (!CHIPC_QUIRK(sc, MUX_SPROM)) return (true); mtx_lock(&Giant); /* for newbus */ parent = device_get_parent(sc->dev); hostb = bhnd_find_hostb_device(parent); if ((error = device_get_children(parent, &devs, &devcount))) { mtx_unlock(&Giant); return (false); } /* Reject any active devices other than ChipCommon, or the * host bridge (if any). */ result = true; for (int i = 0; i < devcount; i++) { if (devs[i] == hostb || devs[i] == sc->dev) continue; if (!device_is_attached(devs[i])) continue; if (device_is_suspended(devs[i])) continue; /* Active device; assume SPROM is busy */ result = false; break; } free(devs, M_TEMP); mtx_unlock(&Giant); return (result); }
static int clkrun_hack(int run) { #ifdef __i386__ devclass_t pci_devclass; device_t *pci_devices, *pci_children, *busp, *childp; int pci_count = 0, pci_childcount = 0; int i, j, port; u_int16_t control; bus_space_tag_t btag; if ((pci_devclass = devclass_find("pci")) == NULL) { return ENXIO; } devclass_get_devices(pci_devclass, &pci_devices, &pci_count); for (i = 0, busp = pci_devices; i < pci_count; i++, busp++) { pci_childcount = 0; if (device_get_children(*busp, &pci_children, &pci_childcount)) continue; for (j = 0, childp = pci_children; j < pci_childcount; j++, childp++) { if (pci_get_vendor(*childp) == 0x8086 && pci_get_device(*childp) == 0x7113) { port = (pci_read_config(*childp, 0x41, 1) << 8) + 0x10; /* XXX */ btag = X86_BUS_SPACE_IO; control = bus_space_read_2(btag, 0x0, port); control &= ~0x2000; control |= run? 0 : 0x2000; bus_space_write_2(btag, 0x0, port, control); free(pci_devices, M_TEMP); free(pci_children, M_TEMP); return 0; } } free(pci_children, M_TEMP); } free(pci_devices, M_TEMP); return ENXIO; #else return 0; #endif }
static int ata_usbchannel_detach(device_t dev) { struct ata_channel *ch = device_get_softc(dev); device_t *children; int nchildren, i; /* detach & delete all children */ if (!device_get_children(dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) if (children[i]) device_delete_child(dev, children[i]); kfree(children, M_TEMP); } spin_uninit(&ch->state_mtx); spin_uninit(&ch->queue_mtx); return 0; }
static int atausb_detach(device_t dev) { struct atausb_softc *sc = device_get_softc(dev); usbd_device_handle udev; device_t *children; int nchildren, i; /* signal that device is going away */ sc->state = ATAUSB_S_DETACH; /* abort all the pipes in case there are active transfers */ usbd_interface2device_handle(sc->iface, &udev); usbd_abort_pipe(udev->default_pipe); if (sc->bulkout_pipe) usbd_abort_pipe(sc->bulkout_pipe); if (sc->bulkin_pipe) usbd_abort_pipe(sc->bulkin_pipe); if (sc->bulkirq_pipe) usbd_abort_pipe(sc->bulkirq_pipe); /* detach & delete all children */ if (!device_get_children(dev, &children, &nchildren)) { for (i = 0; i < nchildren; i++) device_delete_child(dev, children[i]); kfree(children, M_TEMP); } /* free the transfers */ for (i = 0; i < ATAUSB_T_MAX; i++) if (sc->transfer[i]) usbd_free_xfer(sc->transfer[i]); /* remove all the pipes */ if (sc->bulkout_pipe) usbd_close_pipe(sc->bulkout_pipe); if (sc->bulkin_pipe) usbd_close_pipe(sc->bulkin_pipe); if (sc->bulkirq_pipe) usbd_close_pipe(sc->bulkirq_pipe); spin_uninit(&sc->locked_mtx); return 0; }
static int siba_bwn_resume(device_t dev) { struct siba_bwn_softc *ssc = device_get_softc(dev); struct siba_softc *siba = &ssc->ssc_siba; device_t *devlistp; int devcnt, error = 0, i; error = siba_core_resume(siba); if (error != 0) return (error); error = device_get_children(dev, &devlistp, &devcnt); if (error != 0) return (error); for (i = 0 ; i < devcnt ; i++) DEVICE_RESUME(devlistp[i]); kfree(devlistp, M_TEMP); return (0); }