/* * tunclose - close the device - mark i/f down & delete * routing info */ int tunclose(dev_t dev, int flag, int mode, struct lwp *l) { int s; struct tun_softc *tp; struct ifnet *ifp; s = splnet(); if ((tp = tun_find_zunit(minor(dev))) != NULL) { /* interface was "destroyed" before the close */ seldestroy(&tp->tun_rsel); seldestroy(&tp->tun_wsel); softint_disestablish(tp->tun_osih); softint_disestablish(tp->tun_isih); mutex_destroy(&tp->tun_lock); free(tp, M_DEVBUF); goto out_nolock; } if ((tp = tun_find_unit(dev)) == NULL) goto out_nolock; ifp = &tp->tun_if; tp->tun_flags &= ~TUN_OPEN; tp->tun_pgid = 0; selnotify(&tp->tun_rsel, 0, 0); TUNDEBUG ("%s: closed\n", ifp->if_xname); mutex_exit(&tp->tun_lock); /* * junk all pending output */ IFQ_PURGE(&ifp->if_snd); if (ifp->if_flags & IFF_UP) { if_down(ifp); if (ifp->if_flags & IFF_RUNNING) { /* find internet addresses and delete routes */ struct ifaddr *ifa; IFADDR_FOREACH(ifa, ifp) { #if defined(INET) || defined(INET6) if (ifa->ifa_addr->sa_family == AF_INET || ifa->ifa_addr->sa_family == AF_INET6) { rtinit(ifa, (int)RTM_DELETE, tp->tun_flags & TUN_DSTADDR ? RTF_HOST : 0); } #endif } } }
void soput(struct socket *so) { KASSERT(!cv_has_waiters(&so->so_cv)); KASSERT(!cv_has_waiters(&so->so_rcv.sb_cv)); KASSERT(!cv_has_waiters(&so->so_snd.sb_cv)); seldestroy(&so->so_rcv.sb_sel); seldestroy(&so->so_snd.sb_sel); mutex_obj_free(so->so_lock); cv_destroy(&so->so_cv); cv_destroy(&so->so_rcv.sb_cv); cv_destroy(&so->so_snd.sb_cv); pool_cache_put(socket_cache, so); }
static void xenevt_free(struct xenevt_d *d) { int i; KASSERT(mutex_owned(&devevent_lock)); KASSERT(mutex_owned(&d->lock)); for (i = 0; i < NR_EVENT_CHANNELS; i++ ) { if (devevent[i] == d) { evtchn_op_t op = { .cmd = 0 }; int error; hypervisor_mask_event(i); xen_atomic_clear_bit(&d->ci->ci_evtmask[0], i); devevent[i] = NULL; op.cmd = EVTCHNOP_close; op.u.close.port = i; if ((error = HYPERVISOR_event_channel_op(&op))) { printf("xenevt_fclose: error %d from " "hypervisor\n", -error); } } } mutex_exit(&d->lock); seldestroy(&d->sel); cv_destroy(&d->cv); mutex_destroy(&d->lock); free(d, M_DEVBUF); }
/* * dmio_usrreq_done: * * Dmover completion callback. */ static void dmio_usrreq_done(struct dmover_request *dreq) { struct dmio_usrreq_state *dus = dreq->dreq_cookie; struct dmio_state *ds = dreq->dreq_session->dses_cookie; /* We're already at splsoftclock(). */ simple_lock(&ds->ds_slock); TAILQ_REMOVE(&ds->ds_pending, dus, dus_q); if (ds->ds_flags & DMIO_STATE_DEAD) { ds->ds_nreqs--; dmio_usrreq_fini(ds, dus); dmover_request_free(dreq); if (ds->ds_nreqs == 0) { simple_unlock(&ds->ds_slock); seldestroy(&ds->ds_selq); pool_put(&dmio_state_pool, ds); return; } } else { TAILQ_INSERT_TAIL(&ds->ds_complete, dus, dus_q); if (ds->ds_flags & DMIO_STATE_READ_WAIT) { ds->ds_flags &= ~DMIO_STATE_READ_WAIT; wakeup(&ds->ds_complete); } if (ds->ds_flags & DMIO_STATE_SEL) { ds->ds_flags &= ~DMIO_STATE_SEL; selnotify(&ds->ds_selq, POLLOUT | POLLWRNORM, 0); } } simple_unlock(&ds->ds_slock); }
static int udsir_detach(device_t self, int flags) { struct udsir_softc *sc = device_private(self); int s; int rv = 0; DPRINTFN(0, ("udsir_detach: sc=%p flags=%d\n", sc, flags)); sc->sc_closing = sc->sc_dying = 1; wakeup(&sc->sc_thread); while (sc->sc_thread != NULL) tsleep(&sc->sc_closing, PWAIT, "usircl", 0); /* Abort all pipes. Causes processes waiting for transfer to wake. */ if (sc->sc_rd_pipe != NULL) { usbd_abort_pipe(sc->sc_rd_pipe); usbd_close_pipe(sc->sc_rd_pipe); sc->sc_rd_pipe = NULL; } if (sc->sc_wr_pipe != NULL) { usbd_abort_pipe(sc->sc_wr_pipe); usbd_close_pipe(sc->sc_wr_pipe); sc->sc_wr_pipe = NULL; } wakeup(&sc->sc_ur_framelen); wakeup(&sc->sc_wr_buf); s = splusb(); if (--sc->sc_refcnt >= 0) { /* Wait for processes to go away. */ usb_detach_waitold(sc->sc_dev); } splx(s); if (sc->sc_child != NULL) rv = config_detach(sc->sc_child, flags); usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev); seldestroy(&sc->sc_rd_sel); seldestroy(&sc->sc_wr_sel); return rv; }
static int tun_clone_destroy(struct ifnet *ifp) { struct tun_softc *tp = (void *)ifp; int s, zombie = 0; IF_PURGE(&ifp->if_snd); ifp->if_flags &= ~IFF_RUNNING; s = splnet(); simple_lock(&tun_softc_lock); mutex_enter(&tp->tun_lock); LIST_REMOVE(tp, tun_list); if (tp->tun_flags & TUN_OPEN) { /* Hang on to storage until last close */ zombie = 1; tp->tun_flags &= ~TUN_INITED; LIST_INSERT_HEAD(&tunz_softc_list, tp, tun_list); } simple_unlock(&tun_softc_lock); if (tp->tun_flags & TUN_RWAIT) { tp->tun_flags &= ~TUN_RWAIT; wakeup((void *)tp); } selnotify(&tp->tun_rsel, 0, 0); mutex_exit(&tp->tun_lock); splx(s); if (tp->tun_flags & TUN_ASYNC && tp->tun_pgid) fownsignal(tp->tun_pgid, SIGIO, POLL_HUP, 0, NULL); bpf_detach(ifp); if_detach(ifp); if (!zombie) { seldestroy(&tp->tun_rsel); seldestroy(&tp->tun_wsel); softint_disestablish(tp->tun_osih); softint_disestablish(tp->tun_isih); mutex_destroy(&tp->tun_lock); free(tp, M_DEVBUF); } return (0); }
/* * Tear down a firm_event queue. */ void ev_fini(struct evvar *ev) { cv_destroy(&ev->ev_cv); seldestroy(&ev->ev_sel); kmem_free(ev->ev_q, (size_t)EV_QSIZE * sizeof(struct firm_event)); }
int cir_detach(device_t self, int flags) { struct cir_softc *sc = device_private(self); int maj, mn; /* locate the major number */ maj = cdevsw_lookup_major(&cir_cdevsw); /* Nuke the vnodes for any open instances (calls close). */ mn = device_unit(self); vdevgone(maj, mn, mn, VCHR); seldestroy(&sc->sc_rdsel); return (0); }
/* * dmio_close: * * Close file op. */ static int dmio_close(struct file *fp) { struct dmio_state *ds = (struct dmio_state *) fp->f_data; struct dmio_usrreq_state *dus; struct dmover_session *dses; int s; s = splsoftclock(); simple_lock(&ds->ds_slock); ds->ds_flags |= DMIO_STATE_DEAD; /* Garbage-collect all the responses on the queue. */ while ((dus = TAILQ_FIRST(&ds->ds_complete)) != NULL) { TAILQ_REMOVE(&ds->ds_complete, dus, dus_q); ds->ds_nreqs--; dmover_request_free(dus->dus_req); dmio_usrreq_fini(ds, dus); } /* * If there are any requests pending, we have to wait for * them. Don't free the dmio_state in this case. */ if (ds->ds_nreqs == 0) { dses = ds->ds_session; simple_unlock(&ds->ds_slock); seldestroy(&ds->ds_selq); pool_put(&dmio_state_pool, ds); } else { dses = NULL; simple_unlock(&ds->ds_slock); } splx(s); fp->f_data = NULL; if (dses != NULL) dmover_session_destroy(dses); return (0); }
/* * When detaching, we do the inverse of what is done in the attach * routine, in reversed order. */ static int tap_detach(device_t self, int flags) { struct tap_softc *sc = device_private(self); struct ifnet *ifp = &sc->sc_ec.ec_if; #if defined(COMPAT_40) || defined(MODULAR) int error; #endif int s; sc->sc_flags |= TAP_GOING; s = splnet(); tap_stop(ifp, 1); if_down(ifp); splx(s); if (sc->sc_sih != NULL) { softint_disestablish(sc->sc_sih); sc->sc_sih = NULL; } #if defined(COMPAT_40) || defined(MODULAR) /* * Destroying a single leaf is a very straightforward operation using * sysctl_destroyv. One should be sure to always end the path with * CTL_EOL. */ if ((error = sysctl_destroyv(NULL, CTL_NET, AF_LINK, tap_node, device_unit(sc->sc_dev), CTL_EOL)) != 0) aprint_error_dev(self, "sysctl_destroyv returned %d, ignoring\n", error); #endif ether_ifdetach(ifp); if_detach(ifp); ifmedia_delete_instance(&sc->sc_im, IFM_INST_ANY); seldestroy(&sc->sc_rsel); mutex_destroy(&sc->sc_rdlock); mutex_destroy(&sc->sc_kqlock); pmf_device_deregister(self); return (0); }
void cprng_strong_destroy(struct cprng_strong *cprng) { /* * Destroy the rndsink first to prevent calls to the callback. */ rndsink_destroy(cprng->cs_rndsink); KASSERT(!cv_has_waiters(&cprng->cs_cv)); #if 0 KASSERT(!select_has_waiters(&cprng->cs_selq)) /* XXX ? */ #endif nist_ctr_drbg_destroy(&cprng->cs_drbg); seldestroy(&cprng->cs_selq); cv_destroy(&cprng->cs_cv); mutex_destroy(&cprng->cs_lock); explicit_memset(cprng, 0, sizeof(*cprng)); /* paranoia */ kmem_free(cprng, sizeof(*cprng)); }
int uscanner_detach(device_t self, int flags) { struct uscanner_softc *sc = device_private(self); int s; int maj, mn; DPRINTF(("uscanner_detach: sc=%p flags=%d\n", sc, flags)); sc->sc_dying = 1; sc->sc_dev_flags = 0; /* make close really close device */ /* Abort all pipes. Causes processes waiting for transfer to wake. */ if (sc->sc_bulkin_pipe != NULL) usbd_abort_pipe(sc->sc_bulkin_pipe); if (sc->sc_bulkout_pipe != NULL) usbd_abort_pipe(sc->sc_bulkout_pipe); s = splusb(); if (--sc->sc_refcnt >= 0) { /* Wait for processes to go away. */ usb_detach_waitold(sc->sc_dev); } splx(s); /* locate the major number */ maj = cdevsw_lookup_major(&uscanner_cdevsw); /* Nuke the vnodes for any open instances (calls close). */ mn = device_unit(self) * USB_MAX_ENDPOINTS; vdevgone(maj, mn, mn + USB_MAX_ENDPOINTS - 1, VCHR); usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_dev); seldestroy(&sc->sc_selq); return (0); }