/* * Disconnected * * Depending on our state, this could mean several things, but essentially * we are lost. If both channels are closed, and we are marked to reconnect, * schedule another try otherwise just give up. They will contact us. */ static void bthidev_ctl_disconnected(void *arg, int err) { struct bthidev_softc *sc = arg; if (sc->sc_ctl != NULL) { l2cap_detach(&sc->sc_ctl); sc->sc_ctl = NULL; } sc->sc_state = BTHID_CLOSED; if (sc->sc_int == NULL) { aprint_normal_dev(sc->sc_dev, "disconnected\n"); sc->sc_flags &= ~BTHID_CONNECTING; if (sc->sc_flags & BTHID_RECONNECT) callout_schedule(&sc->sc_reconnect, BTHID_RETRY_INTERVAL * hz); else sc->sc_state = BTHID_WAIT_CTL; } else { /* * The interrupt channel should have been closed first, * but its potentially unsafe to detach that from here. * Give them a second to do the right thing or let the * callout handle it. */ callout_schedule(&sc->sc_reconnect, hz); } }
static void bthidev_int_disconnected(void *arg, int err) { struct bthidev_softc *sc = arg; if (sc->sc_int != NULL) { l2cap_detach(&sc->sc_int); sc->sc_int = NULL; } sc->sc_state = BTHID_CLOSED; if (sc->sc_ctl == NULL) { aprint_normal_dev(sc->sc_dev, "disconnected\n"); sc->sc_flags &= ~BTHID_CONNECTING; if (sc->sc_flags & BTHID_RECONNECT) callout_schedule(&sc->sc_reconnect, BTHID_RETRY_INTERVAL * hz); else sc->sc_state = BTHID_WAIT_CTL; } else { /* * The control channel should be closing also, allow * them a chance to do that before we force it. */ callout_schedule(&sc->sc_reconnect, hz); } }
void wskbd_input(device_t dev, u_int type, int value) { struct wskbd_softc *sc = device_private(dev); #if NWSDISPLAY > 0 int num, i; #endif if (sc->sc_repeating) { sc->sc_repeating = 0; callout_stop(&sc->sc_repeat_ch); } device_active(dev, DVA_HARDWARE); #if NWSDISPLAY > 0 /* * If /dev/wskbdN is not connected in event mode translate and * send upstream. */ if (sc->sc_translating) { num = wskbd_translate(sc->id, type, value); if (num > 0) { if (sc->sc_base.me_dispdv != NULL) { #ifdef WSDISPLAY_SCROLLSUPPORT if (sc->id->t_symbols [0] != KS_Print_Screen) { wsdisplay_scroll(sc->sc_base. me_dispdv, WSDISPLAY_SCROLL_RESET); } #endif for (i = 0; i < num; i++) wsdisplay_kbdinput( sc->sc_base.me_dispdv, sc->id->t_symbols[i]); } if (sc->sc_keyrepeat_data.del1 != 0) { sc->sc_repeating = num; callout_schedule(&sc->sc_repeat_ch, mstohz(sc->sc_keyrepeat_data.del1)); } } return; } #endif wskbd_deliver_event(sc, type, value); #if defined(WSKBD_EVENT_AUTOREPEAT) /* Repeat key presses if set. */ if (type == WSCONS_EVENT_KEY_DOWN && sc->sc_keyrepeat_data.del1 != 0) { sc->sc_repeat_type = type; sc->sc_repeat_value = value; sc->sc_repeating = 1; callout_schedule(&sc->sc_repeat_ch, mstohz(sc->sc_keyrepeat_data.del1)); } #endif /* defined(WSKBD_EVENT_AUTOREPEAT) */ }
static void gpiopwm_pulse(void *arg) { struct gpiopwm_softc *sc; sc = arg; if (gpio_pin_read(sc->sc_gpio, &sc->sc_map, 0) == GPIO_PIN_HIGH) { gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_LOW); callout_schedule(&sc->sc_pulse, sc->sc_ticks_off); } else { gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_HIGH); callout_schedule(&sc->sc_pulse, sc->sc_ticks_on); } }
static void wskbd_repeat(void *v) { struct wskbd_softc *sc = (struct wskbd_softc *)v; int s = spltty(); if (!sc->sc_repeating) { /* * race condition: a "key up" event came in when wskbd_repeat() * was already called but not yet spltty()'d */ splx(s); return; } if (sc->sc_translating) { /* deliver keys */ #if NWSDISPLAY > 0 if (sc->sc_base.me_dispdv != NULL) { int i; for (i = 0; i < sc->sc_repeating; i++) wsdisplay_kbdinput(sc->sc_base.me_dispdv, sc->id->t_symbols[i]); } #endif } else { #if defined(WSKBD_EVENT_AUTOREPEAT) /* queue event */ wskbd_deliver_event(sc, sc->sc_repeat_type, sc->sc_repeat_value); #endif /* defined(WSKBD_EVENT_AUTOREPEAT) */ } callout_schedule(&sc->sc_repeat_ch, mstohz(sc->sc_keyrepeat_data.delN)); splx(s); }
static void pconsstart(struct tty *tp) { struct clist *cl; int s, len; uint8_t buf[OFBURSTLEN]; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) { splx(s); return; } tp->t_state |= TS_BUSY; splx(s); cl = &tp->t_outq; len = q_to_b(cl, buf, OFBURSTLEN); prom_putstr(buf, len); s = spltty(); tp->t_state &= ~TS_BUSY; if (ttypull(tp)) { tp->t_state |= TS_TIMEOUT; callout_schedule(&tp->t_rstrt_ch, 1); } splx(s); }
static void tps65217pmic_power_monitor_init(struct tps65217pmic_softc *sc) { uint8_t intr, intrmask, status, ppath; intrmask = TPS65217PMIC_INT_USBM | TPS65217PMIC_INT_ACM | TPS65217PMIC_INT_PBM; status = tps65217pmic_reg_read(sc, TPS65217PMIC_STATUS); ppath = tps65217pmic_reg_read(sc, TPS65217PMIC_PPATH); /* acknowledge and disregard whatever interrupt was generated earlier */ intr = tps65217pmic_reg_read(sc, TPS65217PMIC_INT); sc->sc_usbstatus = status & TPS65217PMIC_STATUS_USBPWR; sc->sc_acstatus = status & TPS65217PMIC_STATUS_ACPWR; sc->sc_usbenabled = ppath & TPS65217PMIC_PPATH_USB_EN; sc->sc_acenabled = ppath & TPS65217PMIC_PPATH_AC_EN; if (intr & intrmask) aprint_normal_dev(sc->sc_dev, "WARNING: hardware interrupt enabled but not supported"); /* set up callout to poll for power source changes */ callout_init(&sc->sc_powerpollco, 0); callout_setfunc(&sc->sc_powerpollco, tps65217pmic_power_monitor, sc); callout_schedule(&sc->sc_powerpollco, hz); }
int testcall(struct lwp *l, void *uap, register_t *retval) { printf("test: initializing\n"); mutex_init(&test_mutex, MUTEX_DEFAULT, IPL_NONE); cv_init(&test_cv, "testcv"); test_sih = softint_establish(SOFTINT_MPSAFE | SOFTINT_SERIAL, test_softint, NULL); callout_init(&test_ch, CALLOUT_MPSAFE); callout_setfunc(&test_ch, test_callout, NULL); printf("test: firing\n"); callout_schedule(&test_ch, hz / 10); printf("test: waiting\n"); mutex_enter(&test_mutex); while (!test_done) { cv_wait(&test_cv, &test_mutex); } mutex_exit(&test_mutex); printf("test: finished\n"); callout_destroy(&test_ch); softint_disestablish(test_sih); mutex_destroy(&test_mutex); cv_destroy(&test_cv); return 0; }
static int vcons_show_screen(void *v, void *cookie, int waitok, void (*cb)(void *, int, int), void *cb_arg) { struct vcons_data *vd = v; struct vcons_screen *scr; scr = cookie; if (scr == vd->active) return 0; vd->wanted = scr; vd->switch_cb = cb; vd->switch_cb_arg = cb_arg; #ifdef VCONS_SWITCH_ASYNC wakeup(&vd->start_drawing); return EAGAIN; #else if (cb) { callout_schedule(&vd->switch_callout, 0); return EAGAIN; } vcons_do_switch(vd); return 0; #endif }
static void kdstart(struct tty *tp) { int s1, s2; s1 = splsoftclock(); s2 = spltty(); if (tp->t_state & (TS_BUSY|TS_TTSTOP|TS_TIMEOUT)) goto out; if (ttypull(tp)) { tp->t_state |= TS_BUSY; if ((s1 & PSR_PIL) == 0) { /* called at level zero - update screen now. */ splx(s2); kd_putfb(tp); s2 = spltty(); tp->t_state &= ~TS_BUSY; } else { /* called at interrupt level - do it later */ callout_schedule(&tp->t_rstrt_ch, 0); } } out: splx(s2); splx(s1); }
void dme_phy_check_link(void *arg) { struct dme_softc *sc = arg; uint32_t reg; int s; s = splnet(); reg = dme_read(sc, DM9000_NSR) & DM9000_NSR_LINKST; if( reg ) reg = IFM_ETHER | IFM_AVALID | IFM_ACTIVE; else { reg = IFM_ETHER | IFM_AVALID; sc->sc_media_active = IFM_NONE; } if ( (sc->sc_media_status != reg) && (reg & IFM_ACTIVE)) { dme_phy_reset(sc); } sc->sc_media_status = reg; callout_schedule(&sc->sc_link_callout, mstohz(2000)); splx(s); }
static int vmt_sysctl_update_clock_sync_period(SYSCTLFN_ARGS) { int error, period; struct sysctlnode node; struct vmt_softc *sc; node = *rnode; sc = (struct vmt_softc *)node.sysctl_data; period = sc->sc_clock_sync_period_seconds; node.sysctl_data = . error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) return error; if (sc->sc_clock_sync_period_seconds != period) { callout_halt(&sc->sc_clock_sync_tick, NULL); sc->sc_clock_sync_period_seconds = period; if (sc->sc_clock_sync_period_seconds > 0) callout_schedule(&sc->sc_clock_sync_tick, mstohz(sc->sc_clock_sync_period_seconds * 1000)); } return 0; }
/* * A timer that regularly polls the blit engine in cases where we don't have interrupts: * a) Broken hardware (typically those that don't have any video capture facility). * b) Blit abort. The hardware doesn't send an interrupt when a blit is aborted. * The timer and hardware IRQ's can and do work in parallel. If the hardware has * irqs, it will shorten the latency somewhat. */ static void via_dmablit_timer(void *arg) { drm_via_blitq_t *blitq = (drm_via_blitq_t *)arg; struct drm_device *dev = blitq->dev; int engine = (int) (blitq - ((drm_via_private_t *)dev->dev_private)->blit_queues); DRM_DEBUG("Polling timer called for engine %d, jiffies %lu\n", engine, (unsigned long) jiffies); via_dmablit_handler(dev, engine, 0); if (!callout_pending(&blitq->poll_timer)) { callout_schedule(&blitq->poll_timer, 1); /* * Rerun handler to delete timer if engines are off, and * to shorten abort latency. This is a little nasty. */ via_dmablit_handler(dev, engine, 0); } }
/* * intr handler */ int wb_sdmmc_intr(struct wb_softc *wb) { uint8_t val; val = wb_read(wb, WB_SD_INTSTS); if (val == 0xff || val == 0x00) return 0; if (wb->wb_sdmmc_dev == NULL) return 1; wb->wb_sdmmc_intsts |= val; if (wb_sdmmc_debug) { char buf[64]; snprintb(buf, sizeof(buf), "\20\1TC\2BUSYEND\3PROGEND\4TIMEOUT" "\5CRC\6FIFO\7CARD\010PENDING", val); REPORT(wb, "WB_SD_INTSTS = %s\n", buf); } if (val & WB_INT_CARD) callout_schedule(&wb->wb_sdmmc_callout, hz / 4); return 1; }
static void midi_rcv_asense(void *arg) { struct midi_softc *sc; sc = arg; mutex_enter(sc->lock); if (sc->dying || !sc->isopen) { mutex_exit(sc->lock); return; } if (sc->rcv_quiescent) { sc->rcv_eof = 1; sc->rcv_quiescent = 0; sc->rcv_expect_asense = 0; cv_broadcast(&sc->rchan); selnotify(&sc->rsel, 0, NOTE_SUBMIT); if (sc->async) softint_schedule(sc->sih); mutex_exit(sc->lock); return; } sc->rcv_quiescent = 1; callout_schedule(&sc->rcv_asense_co, MIDI_RCV_ASENSE_PERIOD); mutex_exit(sc->lock); }
static void midi_xmt_asense(void *arg) { struct midi_softc *sc; int error, armed; sc = arg; mutex_enter(sc->lock); if (sc->pbus || sc->dying || !sc->isopen) { mutex_exit(sc->lock); return; } sc->pbus = 1; if (sc->props & MIDI_PROP_OUT_INTR) { error = sc->hw_if->output(sc->hw_hdl, MIDI_ACK); armed = (error == 0); } else { error = sc->hw_if->output(sc->hw_hdl, MIDI_ACK); armed = 0; } if (!armed) { sc->pbus = 0; callout_schedule(&sc->xmt_asense_co, MIDI_XMT_ASENSE_PERIOD); } mutex_exit(sc->lock); }
/* * The interrupt flavor acquires spl and lock once and releases at the end, * as it expects to write only one byte or message. The interface convention * is that if hw_if->output returns 0, it has initiated transmission and the * completion interrupt WILL be forthcoming; if it has not returned 0, NO * interrupt will be forthcoming, and if it returns EINPROGRESS it wants * another byte right away. */ static int midi_intr_out(struct midi_softc *sc) { struct midi_buffer *mb; int error, msglen; MIDI_BUF_DECLARE(idx); MIDI_BUF_DECLARE(buf); int armed = 0; KASSERT(mutex_owned(sc->lock)); error = 0; mb = &sc->outbuf; MIDI_BUF_CONSUMER_INIT(mb,idx); MIDI_BUF_CONSUMER_INIT(mb,buf); while (idx_cur != idx_lim) { if (sc->hw_if_ext) { error = midi_msg_out(sc, &idx_cur, &idx_lim, &buf_cur, &buf_lim); if (!error ) /* no EINPROGRESS from extended hw_if */ armed = 1; break; } /* or, lacking hw_if_ext ... */ msglen = MB_IDX_LEN(*idx_cur); error = sc->hw_if->output(sc->hw_hdl, *buf_cur); if (error && error != EINPROGRESS) break; ++ buf_cur; MIDI_BUF_WRAP(buf); -- msglen; if (msglen) *idx_cur = PACK_MB_IDX(MB_IDX_CAT(*idx_cur),msglen); else { ++ idx_cur; MIDI_BUF_WRAP(idx); } if (!error) { armed = 1; break; } } MIDI_BUF_CONSUMER_WBACK(mb,idx); MIDI_BUF_CONSUMER_WBACK(mb,buf); if (!armed) { sc->pbus = 0; callout_schedule(&sc->xmt_asense_co, MIDI_XMT_ASENSE_PERIOD); } cv_broadcast(&sc->wchan); selnotify(&sc->wsel, 0, NOTE_SUBMIT); if (sc->async) { softint_schedule(sc->sih); } if (error) { DPRINTF(("midi_intr_output error %d\n", error)); } return error; }
static int gpiopwm_set_off(SYSCTLFN_ARGS) { struct sysctlnode node; struct gpiopwm_softc *sc; int val, error; node = *rnode; sc = node.sysctl_data; callout_halt(&sc->sc_pulse, NULL); gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_LOW); node.sysctl_data = &val; val = sc->sc_ticks_off; error = sysctl_lookup(SYSCTLFN_CALL(&node)); if (error || newp == NULL) return error; sc->sc_ticks_off = val; if (sc->sc_ticks_on > 0 && sc->sc_ticks_off > 0) { gpio_pin_write(sc->sc_gpio, &sc->sc_map, 0, GPIO_PIN_HIGH); callout_schedule(&sc->sc_pulse, sc->sc_ticks_on); } return 0; }
void timeout_test_callout_reschedule(int mpsave, bool use_reset) { enum arg argument = HANDLER_NOT_VISITED; struct callout callout; int retval = 0; printf("== Start a callout and reschedule it after some time with %s. mpsave=%d\n", use_reset ? "reset" : "schedule", mpsave); callout_init(&callout, mpsave); retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler, &argument); assert(retval == 0); usleep(TEST_NOT_FIRED_MS * 1000); assert(argument == HANDLER_NOT_VISITED); if(!use_reset) { retval = callout_schedule(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS)); } else { retval = callout_reset(&callout, RTEMS_MILLISECONDS_TO_TICKS(TIMEOUT_MILLISECONDS), timeout_handler, &argument); } assert(retval != 0); usleep(TEST_NOT_FIRED_MS * 1000); assert(argument == HANDLER_NOT_VISITED); usleep(TEST_FIRED_MS * 1000); assert(argument == HANDLER_VISITED); callout_deactivate(&callout); }
static void biconsdev_output(struct tty *tp) { int s, n; char buf[OBUFSIZ]; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) { splx(s); return; } tp->t_state |= TS_BUSY; splx(s); n = q_to_b(&tp->t_outq, buf, sizeof(buf)); bicons_putn(buf, n); s = spltty(); tp->t_state &= ~TS_BUSY; /* Come back if there's more to do */ if (ttypull(tp)) { tp->t_state |= TS_TIMEOUT; callout_schedule(&tp->t_rstrt_ch, 1); } splx(s); }
bool mod_delayed_work(struct workqueue_struct *wq, struct delayed_work *dw, unsigned long ticks) { bool timer_modified; KASSERT(wq != NULL); linux_work_lock(&dw->work); switch (dw->work.w_state) { case WORK_IDLE: case WORK_INVOKED: if (ticks == 0) { /* Skip the delay and queue it now. */ dw->work.w_state = WORK_PENDING; dw->work.w_wq = wq; workqueue_enqueue(wq->wq_workqueue, &dw->work.w_wk, NULL); } else { callout_init(&dw->dw_callout, CALLOUT_MPSAFE); callout_reset(&dw->dw_callout, ticks, &linux_worker_intr, dw); dw->work.w_state = WORK_DELAYED; dw->work.w_wq = wq; mutex_enter(&wq->wq_lock); TAILQ_INSERT_HEAD(&wq->wq_delayed, dw, dw_entry); mutex_exit(&wq->wq_lock); } timer_modified = false; break; case WORK_DELAYED: /* * Timer is already ticking. Reschedule it. */ callout_schedule(&dw->dw_callout, ticks); timer_modified = true; break; case WORK_PENDING: KASSERT(dw->work.w_wq == wq); timer_modified = false; break; case WORK_CANCELLED: case WORK_DELAYED_CANCELLED: /* XXX Wait for cancellation and then queue? */ timer_modified = false; break; default: panic("delayed work %p in bad state: %d", dw, (int)dw->work.w_state); break; } linux_work_unlock(&dw->work); return timer_modified; }
static void aps_refresh(void *arg) { struct aps_softc *sc = arg; aps_refresh_sensor_data(sc); callout_schedule(&sc->sc_callout, (hz) / 2); }
static void zapm_cyclic(void *v) { struct zapm_softc *sc = (struct zapm_softc *)v; zapm_poll1(sc, 1); callout_schedule(&sc->sc_cyclic_poll, CYCLIC_TIME); }
void kprintf_init_callout(void) { KASSERT(!kprintf_inited_callout); callout_init(&kprnd_callout, CALLOUT_MPSAFE); callout_setfunc(&kprnd_callout, kprintf_rnd_callout, NULL); callout_schedule(&kprnd_callout, hz); kprintf_inited_callout = true; }
static void wzero3kbd_tick(void *arg) { struct wzero3kbd_softc *sc = (struct wzero3kbd_softc *)arg; (void) wzero3kbd_poll1(sc); callout_schedule(&sc->sc_keyscan_ch, sc->sc_interval); }
static void acpitz_tick(void *opaque) { device_t dv = opaque; struct acpitz_softc *sc = device_private(dv); (void)AcpiOsExecute(OSL_NOTIFY_HANDLER, acpitz_get_status, dv); callout_schedule(&sc->sc_callout, sc->sc_zone.tzp * hz / 10); }
static void vtrnd_timer(void *xsc) { struct vtrnd_softc *sc; sc = xsc; vtrnd_harvest(sc); callout_schedule(&sc->vtrnd_callout, 5 * hz); }
static void vmt_clock_sync_tick(void *xarg) { struct vmt_softc *sc = xarg; vmt_sync_guest_clock(sc); callout_schedule(&sc->sc_clock_sync_tick, mstohz(sc->sc_clock_sync_period_seconds * 1000)); }
static void vmt_tick(void *xarg) { struct vmt_softc *sc = xarg; vmt_update_guest_info(sc); vmt_update_guest_uptime(sc); callout_schedule(&sc->sc_tick, hz * 15); }
static void mpcsa_leds_timer(void *aux) { int n, s; struct mpcsa_leds_softc *sc = aux; u_int16_t pins; callout_schedule(&sc->sc_c, mstohz(LEDS_UPDATE_INTERVAL)); s = splserial(); if (!(sc->sc_spi_transfer.st_flags & SPI_F_DONE)) { splx(s); return; } pins = be16toh(sc->sc_pinstate); for (n = 0; n < MPCSA_LEDS_NPINS; n++) { switch (sc->sc_leds[n].l_mode) { default: continue; case LMODE_COMM: if (sc->sc_leds[n].l_comm_cnt > 0) { if (sc->sc_leds[n].l_comm_cnt < INFINITE_BLINK) sc->sc_leds[n].l_comm_cnt--; else sc->sc_leds[n].l_comm_cnt ^= 1; } if ((sc->sc_leds[n].l_conn_cnt > 0) ^ (sc->sc_leds[n].l_comm_cnt & 1)) pins &= ~(1U << n); else pins |= (1U << n); break; case LMODE_BLINK: if (--sc->sc_leds[n].l_blink_cnt <= 0) { pins ^= (1U << n); sc->sc_leds[n].l_blink_cnt = sc->sc_leds[n].l_blink_int; } break; } } HTOBE16(pins); sc->sc_pinstate = pins; splx(s); spi_transfer_init(&sc->sc_spi_transfer); spi_chunk_init(&sc->sc_spi_chunk, 2, (const void *)&sc->sc_pinstate, NULL); spi_transfer_add(&sc->sc_spi_transfer, &sc->sc_spi_chunk); if (spi_transfer(sc->sc_sh, &sc->sc_spi_transfer) != 0) { /* an error occurred! */ } }