static int vmt_detach(device_t self, int flags) { struct vmt_softc *sc = device_private(self); if (sc->sc_tclo_rpc_open) vm_rpc_close(&sc->sc_tclo_rpc); if (sc->sc_smpsw_valid) { sysmon_pswitch_unregister(&sc->sc_ev_sleep.ev_smpsw); sysmon_pswitch_unregister(&sc->sc_ev_reset.ev_smpsw); sysmon_pswitch_unregister(&sc->sc_ev_power.ev_smpsw); } callout_halt(&sc->sc_tick, NULL); callout_destroy(&sc->sc_tick); callout_halt(&sc->sc_tclo_tick, NULL); callout_destroy(&sc->sc_tclo_tick); callout_halt(&sc->sc_clock_sync_tick, NULL); callout_destroy(&sc->sc_clock_sync_tick); if (sc->sc_rpc_buf) kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN); if (sc->sc_log) { sysctl_teardown(&sc->sc_log); sc->sc_log = NULL; } return 0; }
int pcppi_detach(device_t self, int flags) { int rc; struct pcppi_softc *sc = device_private(self); #if NATTIMER > 0 pcppi_detach_speaker(sc); #endif if ((rc = config_detach_children(sc->sc_dv, flags)) != 0) return rc; pmf_device_deregister(self); #if NPCKBD > 0 pckbd_unhook_bell(pcppi_pckbd_bell, sc); #endif mutex_spin_enter(&tty_lock); pcppi_bell_stop(sc); mutex_spin_exit(&tty_lock); callout_halt(&sc->sc_bell_ch, NULL); callout_destroy(&sc->sc_bell_ch); cv_destroy(&sc->sc_slp); bus_space_unmap(sc->sc_iot, sc->sc_ppi_ioh, sc->sc_size); return 0; }
void drm_vblank_cleanup(struct drm_device *dev) { #if defined(__NetBSD__) int i; #endif /* defined(__NetBSD__) */ if (dev->vblank == NULL) return; /* not initialised */ timeout_del(&dev->vblank->vb_disable_timer); #if defined(__NetBSD__) callout_destroy(&dev->vblank->vb_disable_timer); #endif /* defined(__NetBSD__) */ vblank_disable(dev); #if defined(__NetBSD__) for (i = 0; i < dev->vblank->vb_num; i++) cv_destroy(&dev->vblank->vb_crtcs[i].condvar); mutex_destroy(&dev->vblank->vb_lock); #endif /* defined(__NetBSD__) */ drm_free(dev->vblank); dev->vblank = NULL; }
int testcall(struct lwp *l, void *uap, register_t *retval) { printf("test: initializing\n"); mutex_init(&test_mutex, MUTEX_DEFAULT, IPL_NONE); cv_init(&test_cv, "testcv"); test_sih = softint_establish(SOFTINT_MPSAFE | SOFTINT_SERIAL, test_softint, NULL); callout_init(&test_ch, CALLOUT_MPSAFE); callout_setfunc(&test_ch, test_callout, NULL); printf("test: firing\n"); callout_schedule(&test_ch, hz / 10); printf("test: waiting\n"); mutex_enter(&test_mutex); while (!test_done) { cv_wait(&test_cv, &test_mutex); } mutex_exit(&test_mutex); printf("test: finished\n"); callout_destroy(&test_ch); softint_disestablish(test_sih); mutex_destroy(&test_mutex); cv_destroy(&test_cv); return 0; }
static void linux_worker_intr(void *arg) { struct delayed_work *dw = arg; struct workqueue_struct *wq; linux_work_lock(&dw->work); KASSERT((dw->work.w_state == WORK_DELAYED) || (dw->work.w_state == WORK_DELAYED_CANCELLED)); wq = dw->work.w_wq; mutex_enter(&wq->wq_lock); /* Queue the work, or return it to idle and alert any cancellers. */ if (__predict_true(dw->work.w_state == WORK_DELAYED)) { dw->work.w_state = WORK_PENDING; workqueue_enqueue(dw->work.w_wq->wq_workqueue, &dw->work.w_wk, NULL); } else { KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); dw->work.w_state = WORK_IDLE; dw->work.w_wq = NULL; cv_broadcast(&wq->wq_cv); } /* Either way, the callout is done. */ TAILQ_REMOVE(&dw->work.w_wq->wq_delayed, dw, dw_entry); callout_destroy(&dw->dw_callout); mutex_exit(&wq->wq_lock); linux_work_unlock(&dw->work); }
int lpt_detach_subr(device_t self, int flags) { struct lpt_softc *sc = device_private(self); sc->sc_dev_ok = 0; softint_disestablish(sc->sc_sih); callout_destroy(&sc->sc_wakeup_ch); return 0; }
static void filt_timerdetach(struct knote *kn) { callout_t *calloutp; calloutp = (callout_t *)kn->kn_hook; callout_halt(calloutp, NULL); callout_destroy(calloutp); kmem_free(calloutp, sizeof(*calloutp)); atomic_dec_uint(&kq_ncallouts); }
static int fddetach(device_t self, int flags) { struct fd_softc *fd = device_private(self); int bmaj, cmaj, i, mn; fd_motor_off(fd); /* locate the major number */ bmaj = bdevsw_lookup_major(&fd_bdevsw); cmaj = cdevsw_lookup_major(&fd_cdevsw); /* Nuke the vnodes for any open instances. */ for (i = 0; i < MAXPARTITIONS; i++) { mn = DISKMINOR(device_unit(self), i); vdevgone(bmaj, mn, mn, VBLK); vdevgone(cmaj, mn, mn, VCHR); } pmf_device_deregister(self); #if 0 /* XXX need to undo at detach? */ fd_set_properties(fd); #endif #if NRND > 0 rnd_detach_source(&fd->rnd_source); #endif disk_detach(&fd->sc_dk); disk_destroy(&fd->sc_dk); /* Kill off any queued buffers. */ bufq_drain(fd->sc_q); bufq_free(fd->sc_q); callout_destroy(&fd->sc_motoroff_ch); callout_destroy(&fd->sc_motoron_ch); return 0; }
/* * Detach a keyboard. To keep track of users of the softc we keep * a reference count that's incremented while inside, e.g., read. * If the keyboard is active and the reference count is > 0 (0 is the * normal state) we post an event and then wait for the process * that had the reference to wake us up again. Then we blow away the * vnode and return (which will deallocate the softc). */ int wskbd_detach(device_t self, int flags) { struct wskbd_softc *sc = device_private(self); struct wseventvar *evar; int maj, mn; int s; #if NWSMUX > 0 /* Tell parent mux we're leaving. */ if (sc->sc_base.me_parent != NULL) wsmux_detach_sc(&sc->sc_base); #endif callout_halt(&sc->sc_repeat_ch, NULL); callout_destroy(&sc->sc_repeat_ch); if (sc->sc_isconsole) { KASSERT(wskbd_console_device == sc); wskbd_console_device = NULL; } pmf_device_deregister(self); evar = sc->sc_base.me_evp; if (evar != NULL && evar->io != NULL) { s = spltty(); if (--sc->sc_refcnt >= 0) { struct wscons_event event; /* Wake everyone by generating a dummy event. */ event.type = 0; event.value = 0; if (wsevent_inject(evar, &event, 1) != 0) wsevent_wakeup(evar); /* Wait for processes to go away. */ if (tsleep(sc, PZERO, "wskdet", hz * 60)) aprint_error("wskbd_detach: %s didn't detach\n", device_xname(self)); } splx(s); } /* locate the major number */ maj = cdevsw_lookup_major(&wskbd_cdevsw); /* Nuke the vnodes for any open instances. */ mn = device_unit(self); vdevgone(maj, mn, mn, VCHR); return (0); }
static int aps_detach(device_t self, int flags) { struct aps_softc *sc = device_private(self); callout_stop(&sc->sc_callout); callout_destroy(&sc->sc_callout); sysmon_envsys_unregister(sc->sc_sme); bus_space_unmap(sc->sc_iot, sc->sc_ioh, APS_ADDR_SIZE); return 0; }
int fdcdetach(device_t self, int flags) { int rc; struct fdc_softc *fdc = device_private(self); if ((rc = config_detach_children(self, flags)) != 0) return rc; pmf_device_deregister(self); isa_dmamap_destroy(fdc->sc_ic, fdc->sc_drq); isa_drq_free(fdc->sc_ic, fdc->sc_drq); callout_destroy(&fdc->sc_intr_ch); callout_destroy(&fdc->sc_timo_ch); cv_destroy(&fdc->sc_cv); mutex_destroy(&fdc->sc_mtx); return 0; }
static int bthidev_detach(device_t self, int flags) { struct bthidev_softc *sc = device_private(self); struct bthidev *hidev; mutex_enter(bt_lock); sc->sc_flags = 0; /* disable reconnecting */ /* release interrupt listen */ if (sc->sc_int_l != NULL) { l2cap_detach(&sc->sc_int_l); sc->sc_int_l = NULL; } /* release control listen */ if (sc->sc_ctl_l != NULL) { l2cap_detach(&sc->sc_ctl_l); sc->sc_ctl_l = NULL; } /* close interrupt channel */ if (sc->sc_int != NULL) { l2cap_disconnect(sc->sc_int, 0); l2cap_detach(&sc->sc_int); sc->sc_int = NULL; } /* close control channel */ if (sc->sc_ctl != NULL) { l2cap_disconnect(sc->sc_ctl, 0); l2cap_detach(&sc->sc_ctl); sc->sc_ctl = NULL; } callout_halt(&sc->sc_reconnect, bt_lock); callout_destroy(&sc->sc_reconnect); mutex_exit(bt_lock); /* detach children */ while ((hidev = LIST_FIRST(&sc->sc_list)) != NULL) { LIST_REMOVE(hidev, sc_next); config_detach(hidev->sc_dev, flags); } sockopt_destroy(&sc->sc_mode); return 0; }
EXPORT ER tkn_lwp_finish(void) { int i; for(i = 0; i < tskid_lwp_table_maxid; i++) { callout_destroy(&tskid_lwp_table[i].l_timeout_ch); } free(tskid_lwp_table, M_KMEM); mutex_destroy(&spc_lock); return E_OK; }
int tda_detach(device_t self, int flags) { struct tda_softc *sc = device_private(self); if (sc->sc_sme != NULL) sysmon_envsys_destroy(sc->sc_sme); callout_halt(&sc->sc_timer, NULL); callout_destroy(&sc->sc_timer); sc->sc_cfan_speed = sc->sc_sfan_speed = TDA_FANSPEED_MAX; tda_setspeed(sc); return 0; }
static void linux_cancel_delayed_work_callout(struct delayed_work *dw, bool wait) { bool fired_p; KASSERT(linux_work_locked(&dw->work)); KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); if (wait) { /* * We unlock, halt, and then relock, rather than * passing an interlock to callout_halt, for two * reasons: * * (1) The work lock is not a mutex(9), so we can't use it. * (2) The WORK_DELAYED_CANCELLED state serves as an interlock. */ linux_work_unlock(&dw->work); fired_p = callout_halt(&dw->dw_callout, NULL); linux_work_lock(&dw->work); } else { fired_p = callout_stop(&dw->dw_callout); } /* * fired_p means we didn't cancel the callout, so it must have * already begun and will clean up after itself. * * !fired_p means we cancelled it so we have to clean up after * it. Nobody else should have changed the state in that case. */ if (!fired_p) { struct workqueue_struct *wq; KASSERT(linux_work_locked(&dw->work)); KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); wq = dw->work.w_wq; mutex_enter(&wq->wq_lock); TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); callout_destroy(&dw->dw_callout); dw->work.w_state = WORK_IDLE; dw->work.w_wq = NULL; cv_broadcast(&wq->wq_cv); mutex_exit(&wq->wq_lock); } }
static int acpitz_detach(device_t self, int flags) { struct acpitz_softc *sc = device_private(self); ACPI_HANDLE hdl; ACPI_BUFFER al; ACPI_STATUS rv; int i; callout_halt(&sc->sc_callout, NULL); callout_destroy(&sc->sc_callout); pmf_device_deregister(self); acpi_deregister_notify(sc->sc_node); /* * Although the device itself should not contain any power * resources, we have possibly used the resources of active * cooling devices. To unregister these, first fetch a fresh * active cooling zone, and then detach the resources from * the reference handles contained in the cooling zone. */ acpitz_get_zone(self, 0); for (i = 0; i < ATZ_NLEVELS; i++) { if (sc->sc_zone.al[i].Pointer == NULL) continue; al = sc->sc_zone.al[i]; rv = acpi_eval_reference_handle(al.Pointer, &hdl); if (ACPI_SUCCESS(rv)) acpi_power_deregister(hdl); ACPI_FREE(sc->sc_zone.al[i].Pointer); } if (sc->sc_psl) kmem_free(sc->sc_psl, sc->sc_psl_size); if (sc->sc_sme != NULL) sysmon_envsys_unregister(sc->sc_sme); return 0; }
int wb_sdmmc_detach(struct wb_softc *wb, int flags) { int rv; if (wb->wb_sdmmc_dev) { rv = config_detach(wb->wb_sdmmc_dev, flags); if (rv) return rv; } wb_sdmmc_disable(wb); callout_halt(&wb->wb_sdmmc_callout, NULL); callout_destroy(&wb->wb_sdmmc_callout); return 0; }
int gpiopwm_detach(device_t self, int flags) { struct gpiopwm_softc *sc = device_private(self); callout_halt(&sc->sc_pulse, NULL); callout_destroy(&sc->sc_pulse); gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_LOW); pmf_device_deregister(self); gpio_pin_unmap(sc->sc_gpio, &sc->sc_map); if (sc->sc_log != NULL) { sysctl_teardown(&sc->sc_log); sc->sc_log = NULL; } return 0; }
/* ARGSUSED */ static int btbc_detach(device_t self, int flags) { struct btbc_softc *sc = device_private(self); int err = 0; pmf_device_deregister(self); btbc_disable(sc->sc_dev); callout_stop(&sc->sc_ledch); callout_destroy(&sc->sc_ledch); if (sc->sc_unit) { hci_detach(sc->sc_unit); sc->sc_unit = NULL; } pcmcia_function_unconfigure(sc->sc_pf); return err; }
int uyurex_detach(device_t self, int flags) { struct uyurex_softc *sc = device_private(self); int rv = 0; sc->sc_dying = 1; callout_halt(&sc->sc_deltach, NULL); callout_destroy(&sc->sc_deltach); sysmon_envsys_unregister(sc->sc_sme); if (sc->sc_ibuf != NULL) { free(sc->sc_ibuf, M_USBDEV); sc->sc_ibuf = NULL; } usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_hdev.sc_dev); return (rv); }
static int btkbd_detach(device_t self, int flags) { struct btkbd_softc *sc = device_private(self); int err = 0; pmf_device_deregister(self); #ifdef WSDISPLAY_COMPAT_RAWKBD #ifdef BTKBD_REPEAT callout_stop(&sc->sc_repeat); KASSERT(!callout_invoking(&sc->sc_repeat)); callout_destroy(&sc->sc_repeat); #endif #endif if (sc->sc_wskbd != NULL) { err = config_detach(sc->sc_wskbd, flags); sc->sc_wskbd = NULL; } return err; }
static int bthidev_detach(device_t self, int flags) { struct bthidev_softc *sc = device_private(self); struct bthidev *hidev; mutex_enter(bt_lock); sc->sc_flags = 0; /* disable reconnecting */ /* release interrupt listen */ if (sc->sc_int_l != NULL) { l2cap_detach_pcb(&sc->sc_int_l); sc->sc_int_l = NULL; } /* release control listen */ if (sc->sc_ctl_l != NULL) { l2cap_detach_pcb(&sc->sc_ctl_l); sc->sc_ctl_l = NULL; } /* close interrupt channel */ if (sc->sc_int != NULL) { l2cap_disconnect_pcb(sc->sc_int, 0); l2cap_detach_pcb(&sc->sc_int); sc->sc_int = NULL; } /* close control channel */ if (sc->sc_ctl != NULL) { l2cap_disconnect_pcb(sc->sc_ctl, 0); l2cap_detach_pcb(&sc->sc_ctl); sc->sc_ctl = NULL; } callout_halt(&sc->sc_reconnect, bt_lock); callout_destroy(&sc->sc_reconnect); mutex_exit(bt_lock); pmf_device_deregister(self); /* kill off the input processor */ if (sc->sc_lwp != NULL) { mutex_enter(&sc->sc_lock); sc->sc_detach = 1; cv_signal(&sc->sc_cv); mutex_exit(&sc->sc_lock); kthread_join(sc->sc_lwp); sc->sc_lwp = NULL; } /* detach children */ while ((hidev = LIST_FIRST(&sc->sc_list)) != NULL) { LIST_REMOVE(hidev, sc_next); config_detach(hidev->sc_dev, flags); } MBUFQ_DRAIN(&sc->sc_inq); cv_destroy(&sc->sc_cv); mutex_destroy(&sc->sc_lock); sockopt_destroy(&sc->sc_mode); return 0; }