/* * Detach net80211 state on device detach. Tear down * all vap's and reclaim all common state prior to the * device state going away. Note we may call back into * driver; it must be prepared for this. */ void ieee80211_ifdetach(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ieee80211vap *vap; if_detach(ifp); while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL) ieee80211_vap_destroy(vap); ieee80211_waitfor_parent(ic); ieee80211_sysctl_detach(ic); ieee80211_dfs_detach(ic); ieee80211_regdomain_detach(ic); ieee80211_scan_detach(ic); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_detach(ic); #endif ieee80211_ht_detach(ic); /* NB: must be called before ieee80211_node_detach */ ieee80211_proto_detach(ic); ieee80211_crypto_detach(ic); ieee80211_power_detach(ic); ieee80211_node_detach(ic); ifmedia_removeall(&ic->ic_media); taskqueue_free(ic->ic_tq); }
static void destroy_geom_disk(struct nvd_disk *ndisk) { struct bio *bp; struct disk *disk; uint32_t unit; int cnt = 0; disk = ndisk->disk; unit = disk->d_unit; taskqueue_free(ndisk->tq); disk_destroy(ndisk->disk); mtx_lock(&ndisk->bioqlock); for (;;) { bp = bioq_takefirst(&ndisk->bioq); if (bp == NULL) break; bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; cnt++; biodone(bp); } printf(NVD_STR"%u: lost device - %d outstanding\n", unit, cnt); printf(NVD_STR"%u: removing device entry\n", unit); mtx_unlock(&ndisk->bioqlock); mtx_destroy(&ndisk->bioqlock); }
void dmar_fini_qi(struct dmar_unit *unit) { struct dmar_qi_genseq gseq; if (unit->qi_enabled) return; taskqueue_drain(unit->qi_taskqueue, &unit->qi_task); taskqueue_free(unit->qi_taskqueue); unit->qi_taskqueue = NULL; DMAR_LOCK(unit); /* quisce */ dmar_qi_ensure(unit, 1); dmar_qi_emit_wait_seq(unit, &gseq, true); dmar_qi_advance_tail(unit); dmar_qi_wait_for_seq(unit, &gseq, false); /* only after the quisce, disable queue */ dmar_disable_qi_intr(unit); dmar_disable_qi(unit); KASSERT(unit->inv_seq_waiters == 0, ("dmar%d: waiters on disabled queue", unit->unit)); DMAR_UNLOCK(unit); kmem_free(kernel_arena, unit->inv_queue, unit->inv_queue_size); unit->inv_queue = 0; unit->inv_queue_size = 0; unit->qi_enabled = 0; }
int main(int argc, char **argv) { struct taskqueue *t; struct task task; int retval; t = taskqueue_create("test", M_WAITOK, taskqueue_thread_enqueue, &t); if (!t) { kprintf("unable to create taskqueue\n"); return 1; } retval = taskqueue_start_threads(&t, 4, /*num threads*/ PWAIT, /*priority*/ "%s", /* thread name */ "test"); if (retval != 0) { kprintf("failed to create taskqueue threads\n"); return 1; } TASK_INIT(&task, /*priority*/0, task_worker, NULL); retval = taskqueue_enqueue(t, &task); if (retval != 0) { kprintf("failed to enqueue task\n"); return 1; } taskqueue_drain(t, &task); taskqueue_free(t); return 0; }
static int vtblk_detach(device_t dev) { struct vtblk_softc *sc; sc = device_get_softc(dev); VTBLK_LOCK(sc); sc->vtblk_flags |= VTBLK_FLAG_DETACH; if (device_is_attached(dev)) vtblk_stop(sc); VTBLK_UNLOCK(sc); if (sc->vtblk_tq != NULL) { taskqueue_drain(sc->vtblk_tq, &sc->vtblk_intr_task); taskqueue_free(sc->vtblk_tq); sc->vtblk_tq = NULL; } vtblk_drain(sc); if (sc->vtblk_disk != NULL) { disk_destroy(sc->vtblk_disk); sc->vtblk_disk = NULL; } if (sc->vtblk_sglist != NULL) { sglist_free(sc->vtblk_sglist); sc->vtblk_sglist = NULL; } VTBLK_LOCK_DESTROY(sc); return (0); }
static int cfi_disk_detach(device_t dev) { struct cfi_disk_softc *sc = device_get_softc(dev); if (sc->flags & CFI_DISK_OPEN) return EBUSY; taskqueue_free(sc->tq); /* XXX drain bioq */ disk_destroy(sc->disk); mtx_destroy(&sc->qlock); return 0; }
int pefs_uninit(struct vfsconf *vfsp) { taskqueue_enqueue(pefs_taskq, &pefs_task_freenode); taskqueue_drain(pefs_taskq, &pefs_task_freenode); taskqueue_free(pefs_taskq); pefs_dircache_uninit(); pefs_crypto_uninit(); mtx_destroy(&pefs_node_listmtx); free(pefs_nodehash_tbl, M_PEFSHASH); uma_zdestroy(pefs_node_zone); return (0); }
void mlx4_en_destroy_cq(struct mlx4_en_priv *priv, struct mlx4_en_cq **pcq) { struct mlx4_en_dev *mdev = priv->mdev; struct mlx4_en_cq *cq = *pcq; taskqueue_drain(cq->tq, &cq->cq_task); taskqueue_free(cq->tq); mlx4_en_unmap_buffer(&cq->wqres.buf); mlx4_free_hwq_res(mdev->dev, &cq->wqres, cq->buf_size); if (priv->mdev->dev->caps.comp_pool && cq->vector) mlx4_release_eq(priv->mdev->dev, cq->vector); kfree(cq); *pcq = NULL; }
static void athp_attach_preinit(void *arg) { struct ath10k *ar = arg; struct ath10k_pci *ar_pci = ar->sc_psc; int ret; config_intrhook_disestablish(&ar->sc_preinit_hook); /* Setup ioctl handler */ athp_ioctl_setup(ar); ret = ath10k_core_register(ar); if (ret == 0) return; /* Shutdown ioctl handler */ athp_ioctl_teardown(ar); /* XXX TODO: refactor this stuff out */ athp_pci_free_bufs(ar_pci); /* Ensure we disable interrupts from the device */ ath10k_pci_deinit_irq(ar_pci); ath10k_pci_free_irq(ar_pci); bus_release_resource(ar->sc_dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr); /* XXX disable busmaster? */ mtx_destroy(&ar_pci->ps_mtx); mtx_destroy(&ar_pci->ce_mtx); mtx_destroy(&ar->sc_conf_mtx); mtx_destroy(&ar->sc_data_mtx); mtx_destroy(&ar->sc_buf_mtx); mtx_destroy(&ar->sc_dma_mtx); mtx_destroy(&ar->sc_mtx); if (ar_pci->pipe_taskq) { taskqueue_drain_all(ar_pci->pipe_taskq); taskqueue_free(ar_pci->pipe_taskq); } ath10k_core_destroy(ar); }
void altera_sdcard_detach(struct altera_sdcard_softc *sc) { KASSERT(sc->as_taskqueue != NULL, ("%s: taskqueue not present", __func__)); /* * Winding down the driver on detach is a bit complex. Update the * flags to indicate that a detach has been requested, and then wait * for in-progress I/O to wind down before continuing. */ ALTERA_SDCARD_LOCK(sc); sc->as_flags |= ALTERA_SDCARD_FLAG_DETACHREQ; while (sc->as_state != ALTERA_SDCARD_STATE_DETACHED) ALTERA_SDCARD_CONDVAR_WAIT(sc); ALTERA_SDCARD_UNLOCK(sc); /* * Now wait for the possibly still executing taskqueue to drain. In * principle no more events will be scheduled as we've transitioned to * a detached state, but there might still be a request in execution. */ while (taskqueue_cancel_timeout(sc->as_taskqueue, &sc->as_task, NULL)) taskqueue_drain_timeout(sc->as_taskqueue, &sc->as_task); /* * Simulate a disk removal if one is present to deal with any pending * or queued I/O. */ if (sc->as_disk != NULL) altera_sdcard_disk_remove(sc); KASSERT(bioq_first(&sc->as_bioq) == NULL, ("%s: non-empty bioq", __func__)); /* * Free any remaining allocated resources. */ taskqueue_free(sc->as_taskqueue); sc->as_taskqueue = NULL; ALTERA_SDCARD_CONDVAR_DESTROY(sc); ALTERA_SDCARD_LOCK_DESTROY(sc); }
/** * mrsas_cam_detach: De-allocates and teardown CAM * input: Adapter instance soft state * * De-registers and frees the paths and SIMs. */ void mrsas_cam_detach(struct mrsas_softc *sc) { if (sc->ev_tq != NULL) taskqueue_free(sc->ev_tq); lockmgr(&sc->sim_lock, LK_EXCLUSIVE); if (sc->path_0) xpt_free_path(sc->path_0); if (sc->sim_0) { xpt_bus_deregister(cam_sim_path(sc->sim_0)); cam_sim_free(sc->sim_0); } if (sc->path_1) xpt_free_path(sc->path_1); if (sc->sim_1) { xpt_bus_deregister(cam_sim_path(sc->sim_1)); cam_sim_free(sc->sim_1); } lockmgr(&sc->sim_lock, LK_RELEASE); }
/* * Detach net80211 state on device detach. Tear down * all vap's and reclaim all common state prior to the * device state going away. Note we may call back into * driver; it must be prepared for this. */ void ieee80211_ifdetach(struct ieee80211com *ic) { struct ifnet *ifp = ic->ic_ifp; struct ieee80211vap *vap; /* * This detaches the main interface, but not the vaps. * Each VAP may be in a separate VIMAGE. */ CURVNET_SET(ifp->if_vnet); if_detach(ifp); CURVNET_RESTORE(); /* * The VAP is responsible for setting and clearing * the VIMAGE context. */ while ((vap = TAILQ_FIRST(&ic->ic_vaps)) != NULL) ieee80211_vap_destroy(vap); ieee80211_waitfor_parent(ic); ieee80211_sysctl_detach(ic); ieee80211_dfs_detach(ic); ieee80211_regdomain_detach(ic); ieee80211_scan_detach(ic); #ifdef IEEE80211_SUPPORT_SUPERG ieee80211_superg_detach(ic); #endif ieee80211_ht_detach(ic); /* NB: must be called before ieee80211_node_detach */ ieee80211_proto_detach(ic); ieee80211_crypto_detach(ic); ieee80211_power_detach(ic); ieee80211_node_detach(ic); /* XXX VNET needed? */ ifmedia_removeall(&ic->ic_media); taskqueue_free(ic->ic_tq); IEEE80211_TX_LOCK_DESTROY(ic); IEEE80211_LOCK_DESTROY(ic); }
/** * mrsas_cam_detach: De-allocates and teardown CAM * input: Adapter instance soft state * * De-registers and frees the paths and SIMs. */ void mrsas_cam_detach(struct mrsas_softc *sc) { if (sc->ev_tq != NULL) taskqueue_free(sc->ev_tq); mtx_lock(&sc->sim_lock); if (sc->path_0) xpt_free_path(sc->path_0); if (sc->sim_0) { xpt_bus_deregister(cam_sim_path(sc->sim_0)); cam_sim_free(sc->sim_0, FALSE); } if (sc->path_1) xpt_free_path(sc->path_1); if (sc->sim_1) { xpt_bus_deregister(cam_sim_path(sc->sim_1)); cam_sim_free(sc->sim_1, TRUE); } mtx_unlock(&sc->sim_lock); }
static void destroy_geom_disk(struct nvd_disk *ndisk) { struct bio *bp; taskqueue_free(ndisk->tq); disk_destroy(ndisk->disk); mtx_lock(&ndisk->bioqlock); for (;;) { bp = bioq_takefirst(&ndisk->bioq); if (bp == NULL) break; bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; biodone(bp); } mtx_unlock(&ndisk->bioqlock); mtx_destroy(&ndisk->bioqlock); }
void destroy_geom_disk(struct nand_chip *chip) { struct bio *bp; taskqueue_free(chip->tq); disk_destroy(chip->ndisk); disk_destroy(chip->rdisk); mtx_lock(&chip->qlock); for (;;) { bp = bioq_takefirst(&chip->bioq); if (bp == NULL) break; bp->bio_error = EIO; bp->bio_flags |= BIO_ERROR; bp->bio_resid = bp->bio_bcount; biodone(bp); } mtx_unlock(&chip->qlock); mtx_destroy(&chip->qlock); }
static int athp_pci_attach(device_t dev) { struct ath10k_pci *ar_pci = device_get_softc(dev); struct ath10k *ar = &ar_pci->sc_sc; int rid, i; int err = 0; int ret; ar->sc_dev = dev; ar->sc_invalid = 1; /* XXX TODO: initialize sc_debug from TUNABLE */ #if 0 ar->sc_debug = ATH10K_DBG_BOOT | ATH10K_DBG_PCI | ATH10K_DBG_HTC | ATH10K_DBG_PCI_DUMP | ATH10K_DBG_WMI | ATH10K_DBG_BMI | ATH10K_DBG_MAC | ATH10K_DBG_WMI_PRINT | ATH10K_DBG_MGMT | ATH10K_DBG_DATA | ATH10K_DBG_HTT; #endif ar->sc_psc = ar_pci; /* Load-time tunable/sysctl tree */ athp_attach_sysctl(ar); /* Enable WMI/HTT RX for now */ ar->sc_rx_wmi = 1; ar->sc_rx_htt = 1; /* Fetch pcie capability offset */ ret = pci_find_cap(dev, PCIY_EXPRESS, &ar_pci->sc_cap_off); if (ret != 0) { device_printf(dev, "%s: failed to find pci-express capability offset\n", __func__); return (ret); } /* * Initialise ath10k core bits. */ if (ath10k_core_init(ar) < 0) goto bad0; /* * Initialise ath10k freebsd bits. */ sprintf(ar->sc_mtx_buf, "%s:def", device_get_nameunit(dev)); mtx_init(&ar->sc_mtx, ar->sc_mtx_buf, MTX_NETWORK_LOCK, MTX_DEF); sprintf(ar->sc_buf_mtx_buf, "%s:buf", device_get_nameunit(dev)); mtx_init(&ar->sc_buf_mtx, ar->sc_buf_mtx_buf, "athp buf", MTX_DEF); sprintf(ar->sc_dma_mtx_buf, "%s:dma", device_get_nameunit(dev)); mtx_init(&ar->sc_dma_mtx, ar->sc_dma_mtx_buf, "athp dma", MTX_DEF); sprintf(ar->sc_conf_mtx_buf, "%s:conf", device_get_nameunit(dev)); mtx_init(&ar->sc_conf_mtx, ar->sc_conf_mtx_buf, "athp conf", MTX_DEF | MTX_RECURSE); sprintf(ar_pci->ps_mtx_buf, "%s:ps", device_get_nameunit(dev)); mtx_init(&ar_pci->ps_mtx, ar_pci->ps_mtx_buf, "athp ps", MTX_DEF); sprintf(ar_pci->ce_mtx_buf, "%s:ce", device_get_nameunit(dev)); mtx_init(&ar_pci->ce_mtx, ar_pci->ce_mtx_buf, "athp ce", MTX_DEF); sprintf(ar->sc_data_mtx_buf, "%s:data", device_get_nameunit(dev)); mtx_init(&ar->sc_data_mtx, ar->sc_data_mtx_buf, "athp data", MTX_DEF); /* * Initialise ath10k BMI/PCIDIAG bits. */ ret = athp_descdma_alloc(ar, &ar_pci->sc_bmi_txbuf, "bmi_msg_req", 4, 1024); ret |= athp_descdma_alloc(ar, &ar_pci->sc_bmi_rxbuf, "bmi_msg_resp", 4, 1024); if (ret != 0) { device_printf(dev, "%s: failed to allocate BMI TX/RX buffer\n", __func__); goto bad0; } /* * Initialise HTT descriptors/memory. */ ret = ath10k_htt_rx_alloc_desc(ar, &ar->htt); if (ret != 0) { device_printf(dev, "%s: failed to alloc HTT RX descriptors\n", __func__); goto bad; } /* XXX here instead of in core_init because we need the lock init'ed */ callout_init_mtx(&ar->scan.timeout, &ar->sc_data_mtx, 0); ar_pci->pipe_taskq = taskqueue_create("athp pipe taskq", M_NOWAIT, NULL, ar_pci); (void) taskqueue_start_threads(&ar_pci->pipe_taskq, 1, PI_NET, "%s pipe taskq", device_get_nameunit(dev)); if (ar_pci->pipe_taskq == NULL) { device_printf(dev, "%s: couldn't create pipe taskq\n", __func__); err = ENXIO; goto bad; } /* * Look at the device/vendor ID and choose which register offset * mapping to use. This is used by a lot of the register access * pieces to get the correct device-specific windows. */ ar_pci->sc_vendorid = pci_get_vendor(dev); ar_pci->sc_deviceid = pci_get_device(dev); if (athp_pci_hw_lookup(ar_pci) != 0) { device_printf(dev, "%s: hw lookup failed\n", __func__); err = ENXIO; goto bad; } /* * Enable bus mastering. */ pci_enable_busmaster(dev); /* * Setup memory-mapping of PCI registers. */ rid = BS_BAR; ar_pci->sc_sr = bus_alloc_resource_any(dev, SYS_RES_MEMORY, &rid, RF_ACTIVE); if (ar_pci->sc_sr == NULL) { device_printf(dev, "cannot map register space\n"); err = ENXIO; goto bad; } /* Driver copy; hopefully we can delete this */ ar->sc_st = rman_get_bustag(ar_pci->sc_sr); ar->sc_sh = rman_get_bushandle(ar_pci->sc_sr); /* Local copy for bus operations */ ar_pci->sc_st = rman_get_bustag(ar_pci->sc_sr); ar_pci->sc_sh = rman_get_bushandle(ar_pci->sc_sr); /* * Mark device invalid so any interrupts (shared or otherwise) * that arrive before the HAL is setup are discarded. */ ar->sc_invalid = 1; printf("%s: msicount=%d, msixcount=%d\n", __func__, pci_msi_count(dev), pci_msix_count(dev)); /* * Arrange interrupt line. * * XXX TODO: this is effictively ath10k_pci_init_irq(). * Refactor it out later. * * First - attempt MSI. If we get it, then use it. */ i = MSI_NUM_REQUEST; if (pci_alloc_msi(dev, &i) == 0) { device_printf(dev, "%s: %d MSI interrupts\n", __func__, i); ar_pci->num_msi_intrs = MSI_NUM_REQUEST; } else { i = 1; if (pci_alloc_msi(dev, &i) == 0) { device_printf(dev, "%s: 1 MSI interrupt\n", __func__); ar_pci->num_msi_intrs = 1; } else { device_printf(dev, "%s: legacy interrupts\n", __func__); ar_pci->num_msi_intrs = 0; } } err = ath10k_pci_request_irq(ar_pci); if (err != 0) goto bad1; /* * Attach register ops - needed for the caller to do register IO. */ ar->sc_regio.reg_read = athp_pci_regio_read_reg; ar->sc_regio.reg_write = athp_pci_regio_write_reg; ar->sc_regio.reg_s_read = athp_pci_regio_s_read_reg; ar->sc_regio.reg_s_write = athp_pci_regio_s_write_reg; ar->sc_regio.reg_flush = athp_pci_regio_flush_reg; ar->sc_regio.reg_arg = ar_pci; /* * TODO: abstract this out to be a bus/hif specific * attach path. * * I'm not sure what USB/SDIO will look like here, but * I'm pretty sure it won't involve PCI/CE setup. * It'll still have WME/HIF/BMI, but it'll be done over * USB endpoints. */ if (athp_pci_setup_bufs(ar_pci) != 0) { err = ENXIO; goto bad4; } /* HIF ops attach */ ar->hif.ops = &ath10k_pci_hif_ops; ar->hif.bus = ATH10K_BUS_PCI; /* Alloc pipes */ ret = ath10k_pci_alloc_pipes(ar); if (ret) { device_printf(ar->sc_dev, "%s: pci_alloc_pipes failed: %d\n", __func__, ret); /* XXX cleanup */ err = ENXIO; goto bad4; } /* deinit ce */ ath10k_pci_ce_deinit(ar); /* disable irq */ ret = ath10k_pci_irq_disable(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: irq_disable failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* init IRQ */ ret = ath10k_pci_init_irq(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: init_irq failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* Ok, gate open the interrupt handler */ ar->sc_invalid = 0; /* pci_chip_reset */ ret = ath10k_pci_chip_reset(ar_pci); if (ret) { device_printf(ar->sc_dev, "%s: chip_reset failed: %d\n", __func__, ret); err = ENXIO; goto bad4; } /* read SoC/chip version */ ar->sc_chipid = athp_pci_soc_read32(ar, SOC_CHIP_ID_ADDRESS(ar->sc_regofs)); /* Verify chip version is something we can use */ device_printf(ar->sc_dev, "%s: chipid: 0x%08x\n", __func__, ar->sc_chipid); if (! ath10k_pci_chip_is_supported(ar_pci->sc_deviceid, ar->sc_chipid)) { device_printf(ar->sc_dev, "%s: unsupported chip; chipid: 0x%08x\n", __func__, ar->sc_chipid); err = ENXIO; goto bad4; } /* Call main attach method with given info */ ar->sc_preinit_hook.ich_func = athp_attach_preinit; ar->sc_preinit_hook.ich_arg = ar; if (config_intrhook_establish(&ar->sc_preinit_hook) != 0) { device_printf(ar->sc_dev, "%s: couldn't establish preinit hook\n", __func__); goto bad4; } return (0); /* Fallthrough for setup failure */ bad4: athp_pci_free_bufs(ar_pci); /* Ensure we disable interrupts from the device */ ath10k_pci_deinit_irq(ar_pci); ath10k_pci_free_irq(ar_pci); bad1: bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr); bad: ath10k_htt_rx_free_desc(ar, &ar->htt); athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf); athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf); /* XXX disable busmaster? */ mtx_destroy(&ar_pci->ps_mtx); mtx_destroy(&ar_pci->ce_mtx); mtx_destroy(&ar->sc_conf_mtx); mtx_destroy(&ar->sc_data_mtx); mtx_destroy(&ar->sc_buf_mtx); mtx_destroy(&ar->sc_dma_mtx); mtx_destroy(&ar->sc_mtx); if (ar_pci->pipe_taskq) { taskqueue_drain_all(ar_pci->pipe_taskq); taskqueue_free(ar_pci->pipe_taskq); } /* Shutdown ioctl handler */ athp_ioctl_teardown(ar); ath10k_core_destroy(ar); bad0: return (err); }
static int athp_pci_detach(device_t dev) { struct ath10k_pci *ar_pci = device_get_softc(dev); struct ath10k *ar = &ar_pci->sc_sc; ath10k_warn(ar, "%s: called\n", __func__); /* Signal things we're going down.. */ ATHP_LOCK(ar); ar->sc_invalid = 1; ATHP_UNLOCK(ar); /* Shutdown ioctl handler */ athp_ioctl_teardown(ar); /* XXX TODO: synchronise with running things first */ /* * Do a config read to clear pre-existing pci error status. */ (void) pci_read_config(dev, PCIR_COMMAND, 4); /* stop/free the core - this detaches net80211 state */ ath10k_core_unregister(ar); /* kill tasklet(s) */ /* deinit irq - stop getting more interrupts */ ath10k_pci_deinit_irq(ar_pci); /* ce deinit */ ath10k_pci_ce_deinit(ar); /* free pipes */ ath10k_pci_free_pipes(ar); /* free HTT RX buffers */ ath10k_htt_rx_free_desc(ar, &ar->htt); /* pci release */ /* sleep sync */ /* buffers */ athp_pci_free_bufs(ar_pci); /* core itself - destroys taskqueues, etc */ ath10k_core_destroy(ar); /* Free bus resources */ bus_generic_detach(dev); /* Tear down interrupt */ ath10k_pci_free_irq(ar_pci); bus_release_resource(dev, SYS_RES_MEMORY, BS_BAR, ar_pci->sc_sr); /* XXX disable busmastering? */ /* Free BMI buffers */ athp_descdma_free(ar, &ar_pci->sc_bmi_txbuf); athp_descdma_free(ar, &ar_pci->sc_bmi_rxbuf); athp_trace_close(ar); /* Free locks */ mtx_destroy(&ar_pci->ps_mtx); mtx_destroy(&ar_pci->ce_mtx); mtx_destroy(&ar->sc_conf_mtx); mtx_destroy(&ar->sc_data_mtx); mtx_destroy(&ar->sc_buf_mtx); mtx_destroy(&ar->sc_dma_mtx); mtx_destroy(&ar->sc_mtx); /* Tear down the pipe taskqueue */ if (ar_pci->pipe_taskq) { taskqueue_drain_all(ar_pci->pipe_taskq); taskqueue_free(ar_pci->pipe_taskq); } return (0); }
int smc_detach(device_t dev) { int type; struct smc_softc *sc; sc = device_get_softc(dev); SMC_LOCK(sc); smc_stop(sc); SMC_UNLOCK(sc); if (sc->smc_ifp != NULL) { ether_ifdetach(sc->smc_ifp); } callout_drain(&sc->smc_watchdog); callout_drain(&sc->smc_mii_tick_ch); #ifdef DEVICE_POLLING if (sc->smc_ifp->if_capenable & IFCAP_POLLING) ether_poll_deregister(sc->smc_ifp); #endif if (sc->smc_ih != NULL) bus_teardown_intr(sc->smc_dev, sc->smc_irq, sc->smc_ih); if (sc->smc_tq != NULL) { taskqueue_drain(sc->smc_tq, &sc->smc_intr); taskqueue_drain(sc->smc_tq, &sc->smc_rx); taskqueue_drain(sc->smc_tq, &sc->smc_tx); taskqueue_free(sc->smc_tq); sc->smc_tq = NULL; } if (sc->smc_ifp != NULL) { if_free(sc->smc_ifp); } if (sc->smc_miibus != NULL) { device_delete_child(sc->smc_dev, sc->smc_miibus); bus_generic_detach(sc->smc_dev); } if (sc->smc_reg != NULL) { type = SYS_RES_IOPORT; if (sc->smc_usemem) type = SYS_RES_MEMORY; bus_release_resource(sc->smc_dev, type, sc->smc_reg_rid, sc->smc_reg); } if (sc->smc_irq != NULL) bus_release_resource(sc->smc_dev, SYS_RES_IRQ, sc->smc_irq_rid, sc->smc_irq); if (mtx_initialized(&sc->smc_mtx)) mtx_destroy(&sc->smc_mtx); return (0); }