static int vmt_detach(device_t self, int flags) { struct vmt_softc *sc = device_private(self); if (sc->sc_tclo_rpc_open) vm_rpc_close(&sc->sc_tclo_rpc); if (sc->sc_smpsw_valid) { sysmon_pswitch_unregister(&sc->sc_ev_sleep.ev_smpsw); sysmon_pswitch_unregister(&sc->sc_ev_reset.ev_smpsw); sysmon_pswitch_unregister(&sc->sc_ev_power.ev_smpsw); } callout_halt(&sc->sc_tick, NULL); callout_destroy(&sc->sc_tick); callout_halt(&sc->sc_tclo_tick, NULL); callout_destroy(&sc->sc_tclo_tick); callout_halt(&sc->sc_clock_sync_tick, NULL); callout_destroy(&sc->sc_clock_sync_tick); if (sc->sc_rpc_buf) kmem_free(sc->sc_rpc_buf, VMT_RPC_BUFLEN); if (sc->sc_log) { sysctl_teardown(&sc->sc_log); sc->sc_log = NULL; } return 0; }
static int midiclose(dev_t dev, int flags, int ifmt, struct lwp *l) { struct midi_softc *sc; const struct midi_hw_if *hw; sc = device_lookup_private(&midi_cd, MIDIUNIT(dev)); hw = sc->hw_if; DPRINTFN(3,("midiclose %p\n", sc)); mutex_enter(sc->lock); /* midi_start_output(sc); anything buffered => pbus already set! */ while (sc->pbus) { DPRINTFN(8,("midiclose sleep ...\n")); cv_wait(&sc->wchan, sc->lock); } sc->isopen = 0; callout_halt(&sc->xmt_asense_co, sc->lock); callout_halt(&sc->rcv_asense_co, sc->lock); hw->close(sc->hw_hdl); sc->seqopen = 0; sc->seq_md = 0; mutex_exit(sc->lock); return 0; }
static int gpiopwm_set_off(SYSCTLFN_ARGS) { struct sysctlnode node; struct gpiopwm_softc *sc; int val, error; node = *rnode; sc = node.sysctl_data; callout_halt(&sc->sc_pulse, NULL); gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_LOW); node.sysctl_data = &val; val = sc->sc_ticks_off; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) return error; sc->sc_ticks_off = val; if (sc->sc_ticks_on > 0 && sc->sc_ticks_off > 0) { gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_HIGH); callout_schedule(&sc->sc_pulse, sc->sc_ticks_on); } return 0; }
static int vmt_sysctl_update_clock_sync_period(SYSCTLFN_ARGS) { int error, period; struct sysctlnode node; struct vmt_softc *sc; node = *rnode; sc = (struct vmt_softc *)node.sysctl_data; period = sc->sc_clock_sync_period_seconds; node.sysctl_data = . error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) return error; if (sc->sc_clock_sync_period_seconds != period) { callout_halt(&sc->sc_clock_sync_tick, NULL); sc->sc_clock_sync_period_seconds = period; if (sc->sc_clock_sync_period_seconds > 0) callout_schedule(&sc->sc_clock_sync_tick, mstohz(sc->sc_clock_sync_period_seconds * 1000)); } return 0; }
int pcppi_detach(device_t self, int flags) { int rc; struct pcppi_softc *sc = device_private(self); #if NATTIMER > 0 pcppi_detach_speaker(sc); #endif if ((rc = config_detach_children(sc->sc_dv, flags)) != 0) return rc; pmf_device_deregister(self); #if NPCKBD > 0 pckbd_unhook_bell(pcppi_pckbd_bell, sc); #endif mutex_spin_enter(&tty_lock); pcppi_bell_stop(sc); mutex_spin_exit(&tty_lock); callout_halt(&sc->sc_bell_ch, NULL); callout_destroy(&sc->sc_bell_ch); cv_destroy(&sc->sc_slp); bus_space_unmap(sc->sc_iot, sc->sc_ppi_ioh, sc->sc_size); return 0; }
void test_softint(void *cookie) { printf("l_ncsw = %d\n", (int)curlwp->l_ncsw); callout_halt(&test_ch, NULL); printf("l_ncsw = %d\n", (int)curlwp->l_ncsw); }
/* * Set up the given timer. The value in pt->pt_time.it_value is taken * to be an absolute time for CLOCK_REALTIME/CLOCK_MONOTONIC timers and * a relative time for CLOCK_VIRTUAL/CLOCK_PROF timers. */ void timer_settime(struct ptimer *pt) { struct ptimer *ptn, *pptn; struct ptlist *ptl; KASSERT(mutex_owned(&timer_lock)); if (!CLOCK_VIRTUAL_P(pt->pt_type)) { callout_halt(&pt->pt_ch, &timer_lock); if (timespecisset(&pt->pt_time.it_value)) { /* * Don't need to check tshzto() return value, here. * callout_reset() does it for us. */ callout_reset(&pt->pt_ch, pt->pt_type == CLOCK_MONOTONIC ? tshztoup(&pt->pt_time.it_value) : tshzto(&pt->pt_time.it_value), realtimerexpire, pt); } } else { if (pt->pt_active) { ptn = LIST_NEXT(pt, pt_list); LIST_REMOVE(pt, pt_list); for ( ; ptn; ptn = LIST_NEXT(ptn, pt_list)) timespecadd(&pt->pt_time.it_value, &ptn->pt_time.it_value, &ptn->pt_time.it_value); } if (timespecisset(&pt->pt_time.it_value)) { if (pt->pt_type == CLOCK_VIRTUAL) ptl = &pt->pt_proc->p_timers->pts_virtual; else ptl = &pt->pt_proc->p_timers->pts_prof; for (ptn = LIST_FIRST(ptl), pptn = NULL; ptn && timespeccmp(&pt->pt_time.it_value, &ptn->pt_time.it_value, >); pptn = ptn, ptn = LIST_NEXT(ptn, pt_list)) timespecsub(&pt->pt_time.it_value, &ptn->pt_time.it_value, &pt->pt_time.it_value); if (pptn) LIST_INSERT_AFTER(pptn, pt, pt_list); else LIST_INSERT_HEAD(ptl, pt, pt_list); for ( ; ptn ; ptn = LIST_NEXT(ptn, pt_list)) timespecsub(&ptn->pt_time.it_value, &pt->pt_time.it_value, &ptn->pt_time.it_value); pt->pt_active = 1; } else pt->pt_active = 0; }
static int vaudio_halt_input(void *opaque) { struct vaudio_softc *sc = opaque; sc->sc_record.st_running = false; callout_halt(&sc->sc_record.st_callout, NULL); return 0; }
static void filt_timerdetach(struct knote *kn) { callout_t *calloutp; calloutp = (callout_t *)kn->kn_hook; callout_halt(calloutp, NULL); callout_destroy(calloutp); kmem_free(calloutp, sizeof(*calloutp)); atomic_dec_uint(&kq_ncallouts); }
/* * Detach a keyboard. To keep track of users of the softc we keep * a reference count that's incremented while inside, e.g., read. * If the keyboard is active and the reference count is > 0 (0 is the * normal state) we post an event and then wait for the process * that had the reference to wake us up again. Then we blow away the * vnode and return (which will deallocate the softc). */ int wskbd_detach(device_t self, int flags) { struct wskbd_softc *sc = device_private(self); struct wseventvar *evar; int maj, mn; int s; #if NWSMUX > 0 /* Tell parent mux we're leaving. */ if (sc->sc_base.me_parent != NULL) wsmux_detach_sc(&sc->sc_base); #endif callout_halt(&sc->sc_repeat_ch, NULL); callout_destroy(&sc->sc_repeat_ch); if (sc->sc_isconsole) { KASSERT(wskbd_console_device == sc); wskbd_console_device = NULL; } pmf_device_deregister(self); evar = sc->sc_base.me_evp; if (evar != NULL && evar->io != NULL) { s = spltty(); if (--sc->sc_refcnt >= 0) { struct wscons_event event; /* Wake everyone by generating a dummy event. */ event.type = 0; event.value = 0; if (wsevent_inject(evar, &event, 1) != 0) wsevent_wakeup(evar); /* Wait for processes to go away. */ if (tsleep(sc, PZERO, "wskdet", hz * 60)) aprint_error("wskbd_detach: %s didn't detach\n", device_xname(self)); } splx(s); } /* locate the major number */ maj = cdevsw_lookup_major(&wskbd_cdevsw); /* Nuke the vnodes for any open instances. */ mn = device_unit(self); vdevgone(maj, mn, mn, VCHR); return (0); }
static int bthidev_detach(device_t self, int flags) { struct bthidev_softc *sc = device_private(self); struct bthidev *hidev; mutex_enter(bt_lock); sc->sc_flags = 0; /* disable reconnecting */ /* release interrupt listen */ if (sc->sc_int_l != NULL) { l2cap_detach(&sc->sc_int_l); sc->sc_int_l = NULL; } /* release control listen */ if (sc->sc_ctl_l != NULL) { l2cap_detach(&sc->sc_ctl_l); sc->sc_ctl_l = NULL; } /* close interrupt channel */ if (sc->sc_int != NULL) { l2cap_disconnect(sc->sc_int, 0); l2cap_detach(&sc->sc_int); sc->sc_int = NULL; } /* close control channel */ if (sc->sc_ctl != NULL) { l2cap_disconnect(sc->sc_ctl, 0); l2cap_detach(&sc->sc_ctl); sc->sc_ctl = NULL; } callout_halt(&sc->sc_reconnect, bt_lock); callout_destroy(&sc->sc_reconnect); mutex_exit(bt_lock); /* detach children */ while ((hidev = LIST_FIRST(&sc->sc_list)) != NULL) { LIST_REMOVE(hidev, sc_next); config_detach(hidev->sc_dev, flags); } sockopt_destroy(&sc->sc_mode); return 0; }
int tda_detach(device_t self, int flags) { struct tda_softc *sc = device_private(self); if (sc->sc_sme != NULL) sysmon_envsys_destroy(sc->sc_sme); callout_halt(&sc->sc_timer, NULL); callout_destroy(&sc->sc_timer); sc->sc_cfan_speed = sc->sc_sfan_speed = TDA_FANSPEED_MAX; tda_setspeed(sc); return 0; }
static void linux_cancel_delayed_work_callout(struct delayed_work *dw, bool wait) { bool fired_p; KASSERT(linux_work_locked(&dw->work)); KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); if (wait) { /* * We unlock, halt, and then relock, rather than * passing an interlock to callout_halt, for two * reasons: * * (1) The work lock is not a mutex(9), so we can't use it. * (2) The WORK_DELAYED_CANCELLED state serves as an interlock. */ linux_work_unlock(&dw->work); fired_p = callout_halt(&dw->dw_callout, NULL); linux_work_lock(&dw->work); } else { fired_p = callout_stop(&dw->dw_callout); } /* * fired_p means we didn't cancel the callout, so it must have * already begun and will clean up after itself. * * !fired_p means we cancelled it so we have to clean up after * it. Nobody else should have changed the state in that case. */ if (!fired_p) { struct workqueue_struct *wq; KASSERT(linux_work_locked(&dw->work)); KASSERT(dw->work.w_state == WORK_DELAYED_CANCELLED); wq = dw->work.w_wq; mutex_enter(&wq->wq_lock); TAILQ_REMOVE(&wq->wq_delayed, dw, dw_entry); callout_destroy(&dw->dw_callout); dw->work.w_state = WORK_IDLE; dw->work.w_wq = NULL; cv_broadcast(&wq->wq_cv); mutex_exit(&wq->wq_lock); } }
int wb_sdmmc_detach(struct wb_softc *wb, int flags) { int rv; if (wb->wb_sdmmc_dev) { rv = config_detach(wb->wb_sdmmc_dev, flags); if (rv) return rv; } wb_sdmmc_disable(wb); callout_halt(&wb->wb_sdmmc_callout, NULL); callout_destroy(&wb->wb_sdmmc_callout); return 0; }
static int acpitz_detach(device_t self, int flags) { struct acpitz_softc *sc = device_private(self); ACPI_HANDLE hdl; ACPI_BUFFER al; ACPI_STATUS rv; int i; callout_halt(&sc->sc_callout, NULL); callout_destroy(&sc->sc_callout); pmf_device_deregister(self); acpi_deregister_notify(sc->sc_node); /* * Although the device itself should not contain any power * resources, we have possibly used the resources of active * cooling devices. To unregister these, first fetch a fresh * active cooling zone, and then detach the resources from * the reference handles contained in the cooling zone. */ acpitz_get_zone(self, 0); for (i = 0; i < ATZ_NLEVELS; i++) { if (sc->sc_zone.al[i].Pointer == NULL) continue; al = sc->sc_zone.al[i]; rv = acpi_eval_reference_handle(al.Pointer, &hdl); if (ACPI_SUCCESS(rv)) acpi_power_deregister(hdl); ACPI_FREE(sc->sc_zone.al[i].Pointer); } if (sc->sc_psl) kmem_free(sc->sc_psl, sc->sc_psl_size); if (sc->sc_sme != NULL) sysmon_envsys_unregister(sc->sc_sme); return 0; }
int gpiopwm_detach(device_t self, int flags) { struct gpiopwm_softc *sc = device_private(self); callout_halt(&sc->sc_pulse, NULL); callout_destroy(&sc->sc_pulse); gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_LOW); pmf_device_deregister(self); gpio_pin_unmap(sc->sc_gpio, &sc->sc_map); if (sc->sc_log != NULL) { sysctl_teardown(&sc->sc_log); sc->sc_log = NULL; } return 0; }
int uyurex_detach(device_t self, int flags) { struct uyurex_softc *sc = device_private(self); int rv = 0; sc->sc_dying = 1; callout_halt(&sc->sc_deltach, NULL); callout_destroy(&sc->sc_deltach); sysmon_envsys_unregister(sc->sc_sme); if (sc->sc_ibuf != NULL) { free(sc->sc_ibuf, M_USBDEV); sc->sc_ibuf = NULL; } usbd_add_drv_event(USB_EVENT_DRIVER_DETACH, sc->sc_udev, sc->sc_hdev.sc_dev); return (rv); }
void sleepq_enqueue(sleepq_t *sq, wchan_t wchan, const char *wmesg, syncobj_t *sobj) { struct lwp *l = curlwp; #ifndef T2EX if (__predict_false(sobj != &sleep_syncobj || strcmp(wemsg, "callout"))) { #else if (__predict_false(sobj != &sleep_syncobj || (strcmp(wmesg, "callout") != 0 && strcmp(wmesg, "select") != 0 && strcmp(wmesg, "pollsock") != 0))) { #endif panic("sleepq: unsupported enqueue"); } /* * Remove an LWP from a sleep queue if the LWP was deleted while in * the waiting state. */ if ( l->l_sleepq != NULL && (l->l_stat & LSSLEEP) != 0 ) { sleepq_remove(l->l_sleepq, l); } #ifndef T2EX l->l_syncobj = sobj; #endif l->l_wchan = wchan; l->l_sleepq = sq; #ifndef T2EX l->l_wmesg = wmesg; l->l_slptime = 0; #endif l->l_stat = LSSLEEP; #ifndef T2EX l->l_sleeperr = 0; #endif TAILQ_INSERT_TAIL(sq, l, l_sleepchain); } int sleepq_block(int timo, bool hatch) { struct lwp *l = curlwp; int error = 0; //KASSERT(timo == 0 && !hatch); if (timo != 0) { callout_schedule(&l->l_timeout_ch, timo); } #ifdef T2EX if ( l->l_mutex != NULL ) { mutex_exit(l->l_mutex); } #endif mutex_enter(&sq_mtx); while (l->l_wchan) { if ( hatch ) { error = cv_timedwait_sig( &sq_cv, &sq_mtx, timo ); } else { error = cv_timedwait( &sq_cv, &sq_mtx, timo ); } if (error == EINTR) { if (l->l_wchan) { TAILQ_REMOVE(l->l_sleepq, l, l_sleepchain); l->l_wchan = NULL; l->l_sleepq = NULL; } } } mutex_exit(&sq_mtx); #ifdef T2EX l->l_mutex = &spc_lock; #endif if (timo != 0) { /* * Even if the callout appears to have fired, we need to * stop it in order to synchronise with other CPUs. */ if (callout_halt(&l->l_timeout_ch, NULL)) { error = EWOULDBLOCK; } } return error; } #ifdef T2EX lwp_t * sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp) { struct lwp *l; bool found = false; TAILQ_FOREACH(l, sq, l_sleepchain) { if (l->l_wchan == wchan) { found = true; l->l_wchan = NULL; } } if (found) cv_broadcast(&sq_cv); mutex_spin_exit(mp); return NULL; } #else /* * sleepq_wake: * * Wake zero or more LWPs blocked on a single wait channel. */ lwp_t * sleepq_wake(sleepq_t *sq, wchan_t wchan, u_int expected, kmutex_t *mp) { lwp_t *l, *next; int swapin = 0; KASSERT(mutex_owned(mp)); for (l = TAILQ_FIRST(sq); l != NULL; l = next) { KASSERT(l->l_sleepq == sq); KASSERT(l->l_mutex == mp); next = TAILQ_NEXT(l, l_sleepchain); if (l->l_wchan != wchan) continue; swapin |= sleepq_remove(sq, l); if (--expected == 0) break; } mutex_spin_exit(mp); #if 0 /* * If there are newly awakend threads that need to be swapped in, * then kick the swapper into action. */ if (swapin) uvm_kick_scheduler(); #endif return l; }
static int bthidev_detach(device_t self, int flags) { struct bthidev_softc *sc = device_private(self); struct bthidev *hidev; mutex_enter(bt_lock); sc->sc_flags = 0; /* disable reconnecting */ /* release interrupt listen */ if (sc->sc_int_l != NULL) { l2cap_detach_pcb(&sc->sc_int_l); sc->sc_int_l = NULL; } /* release control listen */ if (sc->sc_ctl_l != NULL) { l2cap_detach_pcb(&sc->sc_ctl_l); sc->sc_ctl_l = NULL; } /* close interrupt channel */ if (sc->sc_int != NULL) { l2cap_disconnect_pcb(sc->sc_int, 0); l2cap_detach_pcb(&sc->sc_int); sc->sc_int = NULL; } /* close control channel */ if (sc->sc_ctl != NULL) { l2cap_disconnect_pcb(sc->sc_ctl, 0); l2cap_detach_pcb(&sc->sc_ctl); sc->sc_ctl = NULL; } callout_halt(&sc->sc_reconnect, bt_lock); callout_destroy(&sc->sc_reconnect); mutex_exit(bt_lock); pmf_device_deregister(self); /* kill off the input processor */ if (sc->sc_lwp != NULL) { mutex_enter(&sc->sc_lock); sc->sc_detach = 1; cv_signal(&sc->sc_cv); mutex_exit(&sc->sc_lock); kthread_join(sc->sc_lwp); sc->sc_lwp = NULL; } /* detach children */ while ((hidev = LIST_FIRST(&sc->sc_list)) != NULL) { LIST_REMOVE(hidev, sc_next); config_detach(hidev->sc_dev, flags); } MBUFQ_DRAIN(&sc->sc_inq); cv_destroy(&sc->sc_cv); mutex_destroy(&sc->sc_lock); sockopt_destroy(&sc->sc_mode); return 0; }