void bthidev_int_disconnected(void *arg, int err) { struct bthidev_softc *sc = arg; if (sc->sc_int != NULL) { l2cap_detach(&sc->sc_int); sc->sc_int = NULL; } sc->sc_state = BTHID_CLOSED; if (sc->sc_ctl == NULL) { printf("%s: disconnected\n", sc->sc_btdev.sc_dev.dv_xname); sc->sc_flags &= ~BTHID_CONNECTING; if (sc->sc_flags & BTHID_RECONNECT) timeout_add_sec(&sc->sc_reconnect, BTHID_RETRY_INTERVAL); else sc->sc_state = BTHID_WAIT_CTL; } else { /* * The control channel should be closing also, allow * them a chance to do that before we force it. */ timeout_add_sec(&sc->sc_reconnect, 1); } }
/* * Disconnected * * Depending on our state, this could mean several things, but essentially * we are lost. If both channels are closed, and we are marked to reconnect, * schedule another try otherwise just give up. They will contact us. */ void bthidev_ctl_disconnected(void *arg, int err) { struct bthidev_softc *sc = arg; if (sc->sc_ctl != NULL) { l2cap_detach(&sc->sc_ctl); sc->sc_ctl = NULL; } sc->sc_state = BTHID_CLOSED; if (sc->sc_int == NULL) { printf("%s: disconnected\n", sc->sc_btdev.sc_dev.dv_xname); sc->sc_flags &= ~BTHID_CONNECTING; if (sc->sc_flags & BTHID_RECONNECT) timeout_add_sec(&sc->sc_reconnect, BTHID_RETRY_INTERVAL); else sc->sc_state = BTHID_WAIT_CTL; } else { /* * The interrupt channel should have been closed first, * but its potentially unsafe to detach that from here. * Give them a second to do the right thing or let the * callout handle it. */ timeout_add_sec(&sc->sc_reconnect, 1); } }
int octeon_eth_init(struct ifnet *ifp) { struct octeon_eth_softc *sc = ifp->if_softc; /* XXX don't disable commonly used parts!!! XXX */ if (sc->sc_init_flag == 0) { /* Cancel any pending I/O. */ octeon_eth_stop(ifp, 0); /* Initialize the device */ octeon_eth_configure(sc); cn30xxpko_enable(sc->sc_pko); cn30xxipd_enable(sc->sc_ipd); sc->sc_init_flag = 1; } else { cn30xxgmx_port_enable(sc->sc_gmx_port, 1); } octeon_eth_mediachange(ifp); cn30xxgmx_set_filter(sc->sc_gmx_port); timeout_add_sec(&sc->sc_tick_misc_ch, 1); timeout_add_sec(&sc->sc_tick_free_ch, 1); SET(ifp->if_flags, IFF_RUNNING); ifq_clr_oactive(&ifp->if_snd); return 0; }
void fdfinish(struct fd_softc *fd, struct buf *bp) { struct fdc_softc *fdc = (void *)fd->sc_dev.dv_parent; splassert(IPL_BIO); fd->sc_skip = 0; fd->sc_bp = bufq_dequeue(&fd->sc_bufq); /* * Move this drive to the end of the queue to give others a `fair' * chance. We only force a switch if N operations are completed while * another drive is waiting to be serviced, since there is a long motor * startup delay whenever we switch. */ if (TAILQ_NEXT(fd, sc_drivechain) != NULL && ++fd->sc_ops >= 8) { fd->sc_ops = 0; TAILQ_REMOVE(&fdc->sc_link.fdlink.sc_drives, fd, sc_drivechain); if (fd->sc_bp != NULL) { TAILQ_INSERT_TAIL(&fdc->sc_link.fdlink.sc_drives, fd, sc_drivechain); } } biodone(bp); /* turn off motor 5s from now */ timeout_add_sec(&fd->fd_motor_off_to, 5); fdc->sc_state = DEVIDLE; }
/* * octeon_eth_tick_misc * * => collect statistics * => check link status * => called at softclock */ void octeon_eth_tick_misc(void *arg) { struct octeon_eth_softc *sc = arg; struct ifnet *ifp; u_quad_t iqdrops, delta; int s; s = splnet(); ifp = &sc->sc_arpcom.ac_if; iqdrops = ifp->if_iqdrops; cn30xxgmx_stats(sc->sc_gmx_port); #ifdef OCTEON_ETH_DEBUG delta = ifp->if_iqdrops - iqdrops; printf("%s: %qu packets dropped at GMX FIFO\n", ifp->if_xname, delta); #endif cn30xxpip_stats(sc->sc_pip, ifp, sc->sc_port); delta = ifp->if_iqdrops - iqdrops; #ifdef OCTEON_ETH_DEBUG printf("%s: %qu packets dropped at PIP + GMX FIFO\n", ifp->if_xname, delta); #endif mii_tick(&sc->sc_mii); splx(s); timeout_add_sec(&sc->sc_tick_misc_ch, 1); }
void octrng_attach(struct device *parent, struct device *self, void *aux) { struct octrng_softc *sc = (void *)self; sc->sc_io = aux; uint64_t control_reg; sc->sc_iot = sc->sc_io->aa_bust; if (bus_space_map(sc->sc_iot, OCTEON_RNG_BASE, OCTRNG_MAP_SIZE, 0, &sc->sc_ioh)) { printf(": can't map registers"); } control_reg = octeon_xkphys_read_8(OCTRNG_CONTROL_ADDR); control_reg |= (OCTRNG_ENABLE_OUTPUT | OCTRNG_ENABLE_ENTROPY); octeon_xkphys_write_8(OCTRNG_CONTROL_ADDR, control_reg); timeout_set(&sc->sc_to, octrng_rnd, sc); timeout_add_sec(&sc->sc_to, 5); printf("\n"); }
int viornd_vq_done(struct virtqueue *vq) { struct virtio_softc *vsc = vq->vq_owner; struct viornd_softc *sc = (struct viornd_softc *)vsc->sc_child; int slot, len, i; if (virtio_dequeue(vsc, vq, &slot, &len) != 0) return 0; bus_dmamap_sync(vsc->sc_dmat, sc->sc_dmamap, 0, VIORND_BUFSIZE, BUS_DMASYNC_POSTREAD); if (len > VIORND_BUFSIZE) { printf("%s: inconsistent descriptor length %d > %d\n", sc->sc_dev.dv_xname, len, VIORND_BUFSIZE); goto out; } #if VIORND_DEBUG printf("%s: got %d bytes of entropy\n", __func__, len); #endif for (i = 0; (i + 1) * sizeof(int) <= len; i++) add_true_randomness(sc->sc_buf[i]); if (sc->sc_interval) timeout_add_sec(&sc->sc_tick, sc->sc_interval); out: virtio_dequeue_commit(vq, slot); return 1; }
void ieee80211_node_cache_timeout(void *arg) { struct ieee80211com *ic = arg; ieee80211_clean_nodes(ic, 1); timeout_add_sec(&ic->ic_node_cache_timeout, IEEE80211_CACHE_WAIT); }
int pckbcintr_internal(struct pckbc_internal *t, struct pckbc_softc *sc) { u_char stat; pckbc_slot_t slot; struct pckbc_slotdata *q; int served = 0, data; /* reschedule timeout further into the idle times */ if (timeout_pending(&t->t_poll)) timeout_add_sec(&t->t_poll, 1); for(;;) { stat = bus_space_read_1(t->t_iot, t->t_ioh_c, 0); if (!(stat & KBS_DIB)) break; served = 1; slot = (t->t_haveaux && (stat & KBS_AUXDATA)) ? PCKBC_AUX_SLOT : PCKBC_KBD_SLOT; q = t->t_slotdata[slot]; if (!q) { /* XXX do something for live insertion? */ #ifdef PCKBCDEBUG printf("pckbcintr: no dev for slot %d\n", slot); #endif KBD_DELAY; (void) bus_space_read_1(t->t_iot, t->t_ioh_d, 0); continue; } if (q->polling) break; /* pckbc_poll_data() will get it */ KBD_DELAY; data = bus_space_read_1(t->t_iot, t->t_ioh_d, 0); if (CMD_IN_QUEUE(q) && pckbc_cmdresponse(t, slot, data)) continue; if (sc != NULL) { if (sc->inputhandler[slot]) (*sc->inputhandler[slot])(sc->inputarg[slot], data); #ifdef PCKBCDEBUG else printf("pckbcintr: slot %d lost %d\n", slot, data); #endif } } return (served); }
void __wdstart(struct wd_softc *wd, struct buf *bp) { struct disklabel *lp; u_int64_t nsecs; lp = wd->sc_dk.dk_label; wd->sc_wdc_bio.blkno = DL_BLKTOSEC(lp, bp->b_blkno + DL_SECTOBLK(lp, DL_GETPOFFSET(&lp->d_partitions[DISKPART(bp->b_dev)]))); wd->sc_wdc_bio.blkdone =0; wd->sc_bp = bp; /* * If we're retrying, retry in single-sector mode. This will give us * the sector number of the problem, and will eventually allow the * transfer to succeed. */ if (wd->retries >= WDIORETRIES_SINGLE) wd->sc_wdc_bio.flags = ATA_SINGLE; else wd->sc_wdc_bio.flags = 0; nsecs = howmany(bp->b_bcount, lp->d_secsize); if ((wd->sc_flags & WDF_LBA48) && /* use LBA48 only if really need */ ((wd->sc_wdc_bio.blkno + nsecs - 1 >= LBA48_THRESHOLD) || (nsecs > 0xff))) wd->sc_wdc_bio.flags |= ATA_LBA48; if (wd->sc_flags & WDF_LBA) wd->sc_wdc_bio.flags |= ATA_LBA; if (bp->b_flags & B_READ) wd->sc_wdc_bio.flags |= ATA_READ; wd->sc_wdc_bio.bcount = bp->b_bcount; wd->sc_wdc_bio.databuf = bp->b_data; wd->sc_wdc_bio.wd = wd; /* Instrumentation. */ disk_busy(&wd->sc_dk); switch (wdc_ata_bio(wd->drvp, &wd->sc_wdc_bio)) { case WDC_TRY_AGAIN: timeout_add_sec(&wd->sc_restart_timeout, 1); break; case WDC_QUEUED: break; case WDC_COMPLETE: /* * This code is never executed because we never set * the ATA_POLL flag above */ #if 0 if (wd->sc_wdc_bio.flags & ATA_POLL) wddone(wd); #endif break; default: panic("__wdstart: bad return code from wdc_ata_bio()"); } }
void pckbc_poll(void *v) { struct pckbc_internal *t = v; int s; s = spltty(); (void)pckbcintr_internal(t, t->t_sc); timeout_add_sec(&t->t_poll, 1); splx(s); }
void nep_tick(void *arg) { struct nep_softc *sc = arg; int s; s = splnet(); mii_tick(&sc->sc_mii); splx(s); timeout_add_sec(&sc->sc_tick_ch, 1); }
void cpsw_tick(void *arg) { struct cpsw_softc *sc = arg; int s; s = splnet(); mii_tick(&sc->sc_mii); splx(s); timeout_add_sec(&sc->sc_tick, 1); }
void bmac_mii_tick(void *v) { struct bmac_softc *sc = v; int s; s = splnet(); mii_tick(&sc->sc_mii); splx(s); timeout_add_sec(&sc->sc_tick_ch, 1); }
void drm_vblank_put(struct drm_device *dev, int crtc) { mtx_enter(&dev->vblank->vb_lock); /* Last user schedules disable */ DPRINTF("%s: %d refs = %d\n", __func__, crtc, dev->vblank->vb_crtcs[crtc].vbl_refs); KASSERT(dev->vblank->vb_crtcs[crtc].vbl_refs > 0); if (--dev->vblank->vb_crtcs[crtc].vbl_refs == 0) timeout_add_sec(&dev->vblank->vb_disable_timer, 5); mtx_leave(&dev->vblank->vb_lock); }
/* * octeon_eth_tick_misc * * => collect statistics * => check link status * => called at softclock */ void octeon_eth_tick_misc(void *arg) { struct octeon_eth_softc *sc = arg; struct ifnet *ifp; u_quad_t iqdrops, delta; int s; s = splnet(); ifp = &sc->sc_arpcom.ac_if; iqdrops = ifp->if_iqdrops; cn30xxgmx_stats(sc->sc_gmx_port); #ifdef OCTEON_ETH_DEBUG delta = ifp->if_iqdrops - iqdrops; printf("%s: %qu packets dropped at GMX FIFO\n", ifp->if_xname, delta); #endif cn30xxpip_stats(sc->sc_pip, ifp, sc->sc_port); delta = ifp->if_iqdrops - iqdrops; #ifdef OCTEON_ETH_DEBUG printf("%s: %qu packets dropped at PIP + GMX FIFO\n", ifp->if_xname, delta); #endif mii_tick(&sc->sc_mii); #ifdef OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC if (sc->sc_gmx_port->sc_proc_nibble_by_soft && sc->sc_gmx_port->sc_even_nibble_cnt > PROC_NIBBLE_SOFT_THRESHOLD) { #ifdef OCTEON_ETH_DEBUG log(LOG_DEBUG, "%s: even nibble preamble count %d\n", sc->sc_dev.dv_xname, sc->sc_gmx_port->sc_even_nibble_cnt); #endif if (OCTEON_ETH_FIXUP_ODD_NIBBLE_MODEL_P(sc) && OCTEON_ETH_FIXUP_ODD_NIBBLE_DYNAMIC_SPEED_P(sc->sc_gmx_port, ifp)) { log(LOG_NOTICE, "%s: the preamble processing switched to hardware\n", sc->sc_dev.dv_xname); } sc->sc_gmx_port->sc_proc_nibble_by_soft = 0; octeon_eth_mii_statchg((struct device *)sc); sc->sc_gmx_port->sc_even_nibble_cnt = 0; } #endif splx(s); timeout_add_sec(&sc->sc_tick_misc_ch, 1); }
/* * Degrade the sensor state if we received no EndRun string for more than * TRUSTTIME seconds. */ void endrun_timeout(void *xnp) { struct endrun *np = xnp; if (np->time.status == SENSOR_S_OK) { np->time.status = SENSOR_S_WARN; /* * further degrade in TRUSTTIME seconds if no new valid EndRun * strings are received. */ timeout_add_sec(&np->endrun_tout, TRUSTTIME); } else np->time.status = SENSOR_S_CRIT; }
void pckbc_set_inputhandler(pckbc_tag_t self, pckbc_slot_t slot, pckbc_inputfcn func, void *arg, char *name) { struct pckbc_internal *t = (struct pckbc_internal *)self; struct pckbc_softc *sc = t->t_sc; if (slot >= PCKBC_NSLOTS) panic("pckbc_set_inputhandler: bad slot %d", slot); sc->inputhandler[slot] = func; sc->inputarg[slot] = arg; sc->subname[slot] = name; if (pckbc_console && slot == PCKBC_KBD_SLOT) timeout_add_sec(&t->t_poll, 1); }
int comclose(dev_t dev, int flag, int mode, struct proc *p) { int unit = DEVUNIT(dev); struct com_softc *sc = com_cd.cd_devs[unit]; bus_space_tag_t iot = sc->sc_iot; bus_space_handle_t ioh = sc->sc_ioh; struct tty *tp = sc->sc_tty; int s; #ifdef COM_CONSOLE /* XXX This is for cons.c. */ if (!ISSET(tp->t_state, TS_ISOPEN)) return 0; #endif if(sc->sc_swflags & COM_SW_DEAD) return 0; (*linesw[tp->t_line].l_close)(tp, flag, p); s = spltty(); if (ISSET(tp->t_state, TS_WOPEN)) { /* tty device is waiting for carrier; drop dtr then re-raise */ CLR(sc->sc_mcr, MCR_DTR | MCR_RTS); bus_space_write_1(iot, ioh, com_mcr, sc->sc_mcr); timeout_add_sec(&sc->sc_dtr_tmo, 2); } else { /* no one else waiting; turn off the uart */ compwroff(sc); } CLR(tp->t_state, TS_BUSY | TS_FLUSH); sc->sc_cua = 0; splx(s); ttyclose(tp); #ifdef COM_CONSOLE #ifdef notyet /* XXXX */ if (ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) { ttyfree(tp); sc->sc_tty = 0; } #endif #endif return 0; }
int nep_init(struct ifnet *ifp) { struct nep_softc *sc = (struct nep_softc *)ifp->if_softc; int s; s = splnet(); timeout_add_sec(&sc->sc_tick_ch, 1); ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; ifp->if_timer = 0; splx(s); return 0; }
void pckbc_slot_enable(pckbc_tag_t self, pckbc_slot_t slot, int on) { struct pckbc_internal *t = (struct pckbc_internal *)self; struct pckbc_portcmd *cmd; cmd = &pckbc_portcmd[slot]; if (!pckbc_send_cmd(t->t_iot, t->t_ioh_c, on ? cmd->cmd_en : cmd->cmd_dis)) printf("pckbc_slot_enable(%d) failed\n", on); if (slot == PCKBC_KBD_SLOT) { if (on) timeout_add_sec(&t->t_poll, 1); else timeout_del(&t->t_poll); } }
void ieee80211_inact_timeout(void *arg) { struct ieee80211com *ic = arg; struct ieee80211_node *ni, *next_ni; int s; s = splnet(); for (ni = RB_MIN(ieee80211_tree, &ic->ic_tree); ni != NULL; ni = next_ni) { next_ni = RB_NEXT(ieee80211_tree, &ic->ic_tree, ni); if (ni->ni_refcnt > 0) continue; if (ni->ni_inact < IEEE80211_INACT_MAX) ni->ni_inact++; } splx(s); timeout_add_sec(&ic->ic_inact_timeout, IEEE80211_INACT_WAIT); }
/* * Degrade the sensor state if we received no NMEA sentences for more than * TRUSTTIME seconds. */ void nmea_timeout(void *xnp) { struct nmea *np = xnp; np->signal.value = 0; np->signal.status = SENSOR_S_CRIT; if (np->time.status == SENSOR_S_OK) { np->time.status = SENSOR_S_WARN; np->latitude.status = SENSOR_S_WARN; np->longitude.status = SENSOR_S_WARN; /* * further degrade in TRUSTTIME seconds if no new valid NMEA * sentences are received. */ timeout_add_sec(&np->nmea_tout, TRUSTTIME); } else { np->time.status = SENSOR_S_CRIT; np->latitude.status = SENSOR_S_CRIT; np->longitude.status = SENSOR_S_CRIT; } }
void smsc_init(void *xsc) { struct smsc_softc *sc = xsc; struct ifnet *ifp = &sc->sc_ac.ac_if; struct smsc_chain *c; usbd_status err; int s, i; s = splnet(); /* Cancel pending I/O */ smsc_stop(sc); /* Reset the ethernet interface. */ smsc_reset(sc); /* Init RX ring. */ if (smsc_rx_list_init(sc) == ENOBUFS) { printf("%s: rx list init failed\n", sc->sc_dev.dv_xname); splx(s); return; } /* Init TX ring. */ if (smsc_tx_list_init(sc) == ENOBUFS) { printf("%s: tx list init failed\n", sc->sc_dev.dv_xname); splx(s); return; } /* Program promiscuous mode and multicast filters. */ smsc_iff(sc); /* Open RX and TX pipes. */ err = usbd_open_pipe(sc->sc_iface, sc->sc_ed[SMSC_ENDPT_RX], USBD_EXCLUSIVE_USE, &sc->sc_ep[SMSC_ENDPT_RX]); if (err) { printf("%s: open rx pipe failed: %s\n", sc->sc_dev.dv_xname, usbd_errstr(err)); splx(s); return; } err = usbd_open_pipe(sc->sc_iface, sc->sc_ed[SMSC_ENDPT_TX], USBD_EXCLUSIVE_USE, &sc->sc_ep[SMSC_ENDPT_TX]); if (err) { printf("%s: open tx pipe failed: %s\n", sc->sc_dev.dv_xname, usbd_errstr(err)); splx(s); return; } /* Start up the receive pipe. */ for (i = 0; i < SMSC_RX_LIST_CNT; i++) { c = &sc->sc_cdata.rx_chain[i]; usbd_setup_xfer(c->sc_xfer, sc->sc_ep[SMSC_ENDPT_RX], c, c->sc_buf, sc->sc_bufsz, USBD_SHORT_XFER_OK | USBD_NO_COPY, USBD_NO_TIMEOUT, smsc_rxeof); usbd_transfer(c->sc_xfer); } /* TCP/UDP checksum offload engines. */ smsc_sethwcsum(sc); /* Indicate we are up and running. */ ifp->if_flags |= IFF_RUNNING; ifp->if_flags &= ~IFF_OACTIVE; timeout_add_sec(&sc->sc_stat_ch, 1); splx(s); }
void zstty_rxsoft(struct zstty_softc *zst, struct tty *tp) { struct zs_chanstate *cs = zst->zst_cs; int (*rint)(int, struct tty *) = linesw[tp->t_line].l_rint; uint8_t *get, *end; u_int cc, scc; uint8_t rr1; int code; int s; end = zst->zst_ebuf; get = zst->zst_rbget; scc = cc = zstty_rbuf_size - zst->zst_rbavail; if (cc == zstty_rbuf_size) { zst->zst_floods++; if (zst->zst_errors++ == 0) timeout_add_sec(&zst->zst_diag_ch, 60); } /* If not yet open, drop the entire buffer content here */ if (!ISSET(tp->t_state, TS_ISOPEN)) { get += cc << 1; if (get >= end) get -= zstty_rbuf_size << 1; cc = 0; } while (cc) { code = get[0]; rr1 = get[1]; if (ISSET(rr1, ZSRR1_DO | ZSRR1_FE | ZSRR1_PE)) { if (ISSET(rr1, ZSRR1_DO)) { zst->zst_overflows++; if (zst->zst_errors++ == 0) timeout_add_sec(&zst->zst_diag_ch, 60); } if (ISSET(rr1, ZSRR1_FE)) SET(code, TTY_FE); if (ISSET(rr1, ZSRR1_PE)) SET(code, TTY_PE); } if ((*rint)(code, tp) == -1) { /* * The line discipline's buffer is out of space. */ if (!ISSET(zst->zst_rx_flags, RX_TTY_BLOCKED)) { /* * We're either not using flow control, or the * line discipline didn't tell us to block for * some reason. Either way, we have no way to * know when there's more space available, so * just drop the rest of the data. */ get += cc << 1; if (get >= end) get -= zstty_rbuf_size << 1; cc = 0; } else { /* * Don't schedule any more receive processing * until the line discipline tells us there's * space available (through comhwiflow()). * Leave the rest of the data in the input * buffer. */ SET(zst->zst_rx_flags, RX_TTY_OVERFLOWED); } break; } get += 2; if (get >= end) get = zst->zst_rbuf; cc--; } if (cc != scc) { zst->zst_rbget = get; s = splzs(); cc = zst->zst_rbavail += scc - cc; /* Buffers should be ok again, release possible block. */ if (cc >= zst->zst_r_lowat) { if (ISSET(zst->zst_rx_flags, RX_IBUF_OVERFLOWED)) { CLR(zst->zst_rx_flags, RX_IBUF_OVERFLOWED); SET(cs->cs_preg[1], ZSWR1_RIE); cs->cs_creg[1] = cs->cs_preg[1]; zs_write_reg(cs, 1, cs->cs_creg[1]); } if (ISSET(zst->zst_rx_flags, RX_IBUF_BLOCKED)) { CLR(zst->zst_rx_flags, RX_IBUF_BLOCKED); zs_hwiflow(zst); } } splx(s); } }
/* Decode the time string */ void endrun_decode(struct endrun *np, struct tty *tp, char *fld[], int fldcnt) { int64_t date_nano, time_nano, offset_nano, endrun_now; char tfom; int jumped = 0; if (fldcnt != NUMFLDS) { DPRINTF(("endrun: field count mismatch, %d\n", fldcnt)); return; } if (endrun_time_to_nano(fld[3], &time_nano) == -1) { DPRINTF(("endrun: illegal time, %s\n", fld[3])); return; } if (endrun_date_to_nano(fld[1], fld[2], &date_nano) == -1) { DPRINTF(("endrun: illegal date, %s %s\n", fld[1], fld[2])); return; } offset_nano = 0; /* only parse offset when timemode is local */ if (fld[5][0] == 'L' && endrun_offset_to_nano(fld[4], &offset_nano) == -1) { DPRINTF(("endrun: illegal offset, %s\n", fld[4])); return; } endrun_now = date_nano + time_nano + offset_nano; if (endrun_now <= np->last) { DPRINTF(("endrun: time not monotonically increasing " "last %lld now %lld\n", (long long)np->last, (long long)endrun_now)); jumped = 1; } np->last = endrun_now; np->gap = 0LL; #ifdef ENDRUN_DEBUG if (np->time.status == SENSOR_S_UNKNOWN) { np->time.status = SENSOR_S_OK; timeout_add_sec(&np->endrun_tout, TRUSTTIME); } #endif np->time.value = np->ts.tv_sec * 1000000000LL + np->ts.tv_nsec - endrun_now; np->time.tv.tv_sec = np->ts.tv_sec; np->time.tv.tv_usec = np->ts.tv_nsec / 1000L; if (np->time.status == SENSOR_S_UNKNOWN) { np->time.status = SENSOR_S_OK; np->time.flags &= ~SENSOR_FINVALID; strlcpy(np->time.desc, "EndRun", sizeof(np->time.desc)); } /* * Only update the timeout if the clock reports the time as valid. * * Time Figure Of Merit (TFOM) values: * * 6 - time error is < 100 us * 7 - time error is < 1 ms * 8 - time error is < 10 ms * 9 - time error is > 10 ms, * unsynchronized state if never locked to CDMA */ switch (tfom = fld[0][0]) { case '6': case '7': case '8': np->time.status = SENSOR_S_OK; np->signal.status = SENSOR_S_OK; break; case '9': np->signal.status = SENSOR_S_WARN; break; default: DPRINTF(("endrun: invalid TFOM: '%c'\n", tfom)); np->signal.status = SENSOR_S_CRIT; break; } #ifdef ENDRUN_DEBUG if (np->tfom != tfom) { DPRINTF(("endrun: TFOM changed from %c to %c\n", np->tfom, tfom)); np->tfom = tfom; } #endif if (jumped) np->time.status = SENSOR_S_WARN; if (np->time.status == SENSOR_S_OK) timeout_add_sec(&np->endrun_tout, TRUSTTIME); /* * If tty timestamping is requested, but no PPS signal is present, set * the sensor state to CRITICAL. */ if (np->no_pps) np->time.status = SENSOR_S_CRIT; }
/* * Loop over a tdb chain, taking into consideration protocol tunneling. The * fourth argument is set if the first encapsulation header is already in * place. */ int ipsp_process_packet(struct mbuf *m, struct tdb *tdb, int af, int tunalready) { int i, off, error; struct mbuf *mp; #ifdef INET6 struct ip6_ext ip6e; int nxt; int dstopt = 0; #endif int setdf = 0; struct ip *ip; #ifdef INET6 struct ip6_hdr *ip6; #endif /* INET6 */ #ifdef ENCDEBUG char buf[INET6_ADDRSTRLEN]; #endif /* Check that the transform is allowed by the administrator. */ if ((tdb->tdb_sproto == IPPROTO_ESP && !esp_enable) || (tdb->tdb_sproto == IPPROTO_AH && !ah_enable) || (tdb->tdb_sproto == IPPROTO_IPCOMP && !ipcomp_enable)) { DPRINTF(("ipsp_process_packet(): IPsec outbound packet " "dropped due to policy (check your sysctls)\n")); m_freem(m); return EHOSTUNREACH; } /* Sanity check. */ if (!tdb->tdb_xform) { DPRINTF(("ipsp_process_packet(): uninitialized TDB\n")); m_freem(m); return EHOSTUNREACH; } /* Check if the SPI is invalid. */ if (tdb->tdb_flags & TDBF_INVALID) { DPRINTF(("ipsp_process_packet(): attempt to use invalid " "SA %s/%08x/%u\n", ipsp_address(&tdb->tdb_dst, buf, sizeof(buf)), ntohl(tdb->tdb_spi), tdb->tdb_sproto)); m_freem(m); return ENXIO; } /* Check that the network protocol is supported */ switch (tdb->tdb_dst.sa.sa_family) { case AF_INET: break; #ifdef INET6 case AF_INET6: break; #endif /* INET6 */ default: DPRINTF(("ipsp_process_packet(): attempt to use " "SA %s/%08x/%u for protocol family %d\n", ipsp_address(&tdb->tdb_dst, buf, sizeof(buf)), ntohl(tdb->tdb_spi), tdb->tdb_sproto, tdb->tdb_dst.sa.sa_family)); m_freem(m); return ENXIO; } /* * Register first use if applicable, setup relevant expiration timer. */ if (tdb->tdb_first_use == 0) { tdb->tdb_first_use = time_second; if (tdb->tdb_flags & TDBF_FIRSTUSE) timeout_add_sec(&tdb->tdb_first_tmo, tdb->tdb_exp_first_use); if (tdb->tdb_flags & TDBF_SOFT_FIRSTUSE) timeout_add_sec(&tdb->tdb_sfirst_tmo, tdb->tdb_soft_first_use); } /* * Check for tunneling if we don't have the first header in place. * When doing Ethernet-over-IP, we are handed an already-encapsulated * frame, so we don't need to re-encapsulate. */ if (tunalready == 0) { /* * If the target protocol family is different, we know we'll be * doing tunneling. */ if (af == tdb->tdb_dst.sa.sa_family) { if (af == AF_INET) i = sizeof(struct ip); #ifdef INET6 if (af == AF_INET6) i = sizeof(struct ip6_hdr); #endif /* INET6 */ /* Bring the network header in the first mbuf. */ if (m->m_len < i) { if ((m = m_pullup(m, i)) == NULL) return ENOBUFS; } if (af == AF_INET) { ip = mtod(m, struct ip *); /* * This is not a bridge packet, remember if we * had IP_DF. */ setdf = ip->ip_off & htons(IP_DF); } #ifdef INET6 if (af == AF_INET6) ip6 = mtod(m, struct ip6_hdr *); #endif /* INET6 */ } /* Do the appropriate encapsulation, if necessary. */ if ((tdb->tdb_dst.sa.sa_family != af) || /* PF mismatch */ (tdb->tdb_flags & TDBF_TUNNELING) || /* Tunneling needed */ (tdb->tdb_xform->xf_type == XF_IP4) || /* ditto */ ((tdb->tdb_dst.sa.sa_family == AF_INET) && (tdb->tdb_dst.sin.sin_addr.s_addr != INADDR_ANY) && (tdb->tdb_dst.sin.sin_addr.s_addr != ip->ip_dst.s_addr)) || #ifdef INET6 ((tdb->tdb_dst.sa.sa_family == AF_INET6) && (!IN6_IS_ADDR_UNSPECIFIED(&tdb->tdb_dst.sin6.sin6_addr)) && (!IN6_ARE_ADDR_EQUAL(&tdb->tdb_dst.sin6.sin6_addr, &ip6->ip6_dst))) || #endif /* INET6 */ 0) { /* Fix IPv4 header checksum and length. */ if (af == AF_INET) { if (m->m_len < sizeof(struct ip)) if ((m = m_pullup(m, sizeof(struct ip))) == NULL) return ENOBUFS; ip = mtod(m, struct ip *); ip->ip_len = htons(m->m_pkthdr.len); ip->ip_sum = 0; ip->ip_sum = in_cksum(m, ip->ip_hl << 2); } #ifdef INET6 /* Fix IPv6 header payload length. */ if (af == AF_INET6) { if (m->m_len < sizeof(struct ip6_hdr)) if ((m = m_pullup(m, sizeof(struct ip6_hdr))) == NULL) return ENOBUFS; if (m->m_pkthdr.len - sizeof(*ip6) > IPV6_MAXPACKET) { /* No jumbogram support. */ m_freem(m); return ENXIO; /*?*/ } ip6 = mtod(m, struct ip6_hdr *); ip6->ip6_plen = htons(m->m_pkthdr.len - sizeof(*ip6)); } #endif /* INET6 */ /* Encapsulate -- the last two arguments are unused. */ error = ipip_output(m, tdb, &mp, 0, 0); if ((mp == NULL) && (!error)) error = EFAULT; if (error) { m_freem(mp); return error; } m = mp; mp = NULL; if (tdb->tdb_dst.sa.sa_family == AF_INET && setdf) { if (m->m_len < sizeof(struct ip)) if ((m = m_pullup(m, sizeof(struct ip))) == NULL) return ENOBUFS; ip = mtod(m, struct ip *); ip->ip_off |= htons(IP_DF); } /* Remember that we appended a tunnel header. */ tdb->tdb_flags |= TDBF_USEDTUNNEL; } /* We may be done with this TDB */ if (tdb->tdb_xform->xf_type == XF_IP4) return ipsp_process_done(m, tdb); } else {
/* * Put command into the device's command queue, return zero or errno. */ int pckbc_enqueue_cmd(pckbc_tag_t self, pckbc_slot_t slot, u_char *cmd, int len, int responselen, int sync, u_char *respbuf) { struct pckbc_internal *t = self; struct pckbc_slotdata *q = t->t_slotdata[slot]; struct pckbc_devcmd *nc; int s, isactive, res = 0; if ((len > 4) || (responselen > 4)) return (EINVAL); s = spltty(); nc = TAILQ_FIRST(&q->freequeue); if (nc) { TAILQ_REMOVE(&q->freequeue, nc, next); } splx(s); if (!nc) return (ENOMEM); bzero(nc, sizeof(*nc)); memcpy(nc->cmd, cmd, len); nc->cmdlen = len; nc->responselen = responselen; nc->flags = (sync ? KBC_CMDFLAG_SYNC : 0); s = spltty(); if (q->polling && sync) { /* * XXX We should poll until the queue is empty. * But we don't come here normally, so make * it simple and throw away everything. */ pckbc_cleanqueue(q); } isactive = CMD_IN_QUEUE(q); TAILQ_INSERT_TAIL(&q->cmdqueue, nc, next); if (!isactive) pckbc_start(t, slot); if (q->polling) res = (sync ? nc->status : 0); else if (sync) { if ((res = tsleep(nc, 0, "kbccmd", 1*hz))) { TAILQ_REMOVE(&q->cmdqueue, nc, next); pckbc_cleanup(t); } else { TAILQ_REMOVE(&q->cmdqueue, nc, next); res = nc->status; } } else timeout_add_sec(&t->t_cleanup, 1); if (sync) { if (respbuf) memcpy(respbuf, nc->response, responselen); TAILQ_INSERT_TAIL(&q->freequeue, nc, next); } splx(s); return (res); }
void ieee80211_create_ibss(struct ieee80211com* ic, struct ieee80211_channel *chan) { struct ieee80211_node *ni; struct ifnet *ifp = &ic->ic_if; ni = ic->ic_bss; if (ifp->if_flags & IFF_DEBUG) printf("%s: creating ibss\n", ifp->if_xname); ic->ic_flags |= IEEE80211_F_SIBSS; ni->ni_chan = chan; ni->ni_rates = ic->ic_sup_rates[ieee80211_chan2mode(ic, ni->ni_chan)]; ni->ni_txrate = 0; IEEE80211_ADDR_COPY(ni->ni_macaddr, ic->ic_myaddr); IEEE80211_ADDR_COPY(ni->ni_bssid, ic->ic_myaddr); if (ic->ic_opmode == IEEE80211_M_IBSS) { if ((ic->ic_flags & IEEE80211_F_DESBSSID) != 0) IEEE80211_ADDR_COPY(ni->ni_bssid, ic->ic_des_bssid); else ni->ni_bssid[0] |= 0x02; /* local bit for IBSS */ } ni->ni_esslen = ic->ic_des_esslen; memcpy(ni->ni_essid, ic->ic_des_essid, ni->ni_esslen); ni->ni_rssi = 0; ni->ni_rstamp = 0; memset(ni->ni_tstamp, 0, sizeof(ni->ni_tstamp)); ni->ni_intval = ic->ic_lintval; ni->ni_capinfo = IEEE80211_CAPINFO_IBSS; if (ic->ic_flags & IEEE80211_F_WEPON) ni->ni_capinfo |= IEEE80211_CAPINFO_PRIVACY; if (ic->ic_flags & IEEE80211_F_RSNON) { struct ieee80211_key *k; /* initialize 256-bit global key counter to a random value */ arc4random_buf(ic->ic_globalcnt, EAPOL_KEY_NONCE_LEN); ni->ni_rsnprotos = ic->ic_rsnprotos; ni->ni_rsnakms = ic->ic_rsnakms; ni->ni_rsnciphers = ic->ic_rsnciphers; ni->ni_rsngroupcipher = ic->ic_rsngroupcipher; ni->ni_rsngroupmgmtcipher = ic->ic_rsngroupmgmtcipher; ni->ni_rsncaps = 0; if (ic->ic_caps & IEEE80211_C_MFP) { ni->ni_rsncaps |= IEEE80211_RSNCAP_MFPC; if (ic->ic_flags & IEEE80211_F_MFPR) ni->ni_rsncaps |= IEEE80211_RSNCAP_MFPR; } ic->ic_def_txkey = 1; k = &ic->ic_nw_keys[ic->ic_def_txkey]; memset(k, 0, sizeof(*k)); k->k_id = ic->ic_def_txkey; k->k_cipher = ni->ni_rsngroupcipher; k->k_flags = IEEE80211_KEY_GROUP | IEEE80211_KEY_TX; k->k_len = ieee80211_cipher_keylen(k->k_cipher); arc4random_buf(k->k_key, k->k_len); (*ic->ic_set_key)(ic, ni, k); /* XXX */ if (ic->ic_caps & IEEE80211_C_MFP) { ic->ic_igtk_kid = 4; k = &ic->ic_nw_keys[ic->ic_igtk_kid]; memset(k, 0, sizeof(*k)); k->k_id = ic->ic_igtk_kid; k->k_cipher = ni->ni_rsngroupmgmtcipher; k->k_flags = IEEE80211_KEY_IGTK | IEEE80211_KEY_TX; k->k_len = 16; arc4random_buf(k->k_key, k->k_len); (*ic->ic_set_key)(ic, ni, k); /* XXX */ } /* * In HostAP mode, multicast traffic is sent using ic_bss * as the Tx node, so mark our node as valid so we can send * multicast frames using the group key we've just configured. */ ni->ni_port_valid = 1; ni->ni_flags |= IEEE80211_NODE_TXPROT; /* schedule a GTK/IGTK rekeying after 3600s */ timeout_add_sec(&ic->ic_rsn_timeout, 3600); } timeout_add_sec(&ic->ic_inact_timeout, IEEE80211_INACT_WAIT); timeout_add_sec(&ic->ic_node_cache_timeout, IEEE80211_CACHE_WAIT); ieee80211_new_state(ic, IEEE80211_S_RUN, -1); }
int sxiuart_intr(void *arg) { struct sxiuart_softc *sc = arg; bus_space_tag_t iot = sc->sc_iot; bus_space_handle_t ioh = sc->sc_ioh; struct tty *tp; uint32_t cnt; uint8_t c, iir, lsr, msr, delta; uint8_t *p; iir = bus_space_read_1(iot, ioh, SXIUART_IIR); if ((iir & IIR_IMASK) == IIR_BUSY) { (void)bus_space_read_1(iot, ioh, SXIUART_USR); return (0); } if (ISSET(iir, IIR_NOPEND)) return (0); if (sc->sc_tty == NULL) return (0); tp = sc->sc_tty; cnt = 0; loop: lsr = bus_space_read_1(iot, ioh, SXIUART_LSR); if (ISSET(lsr, LSR_RXRDY)) { if (cnt == 0) { p = sc->sc_ibufp; softintr_schedule(sc->sc_si); } cnt++; c = bus_space_read_1(iot, ioh, SXIUART_RBR); if (ISSET(lsr, LSR_BI)) { #if defined(DDB) if (ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) { if (db_console) Debugger(); goto loop; } #endif c = 0; } if (p >= sc->sc_ibufend) { sc->sc_floods++; if (sc->sc_errors++ == 0) timeout_add_sec(&sc->sc_diag_tmo, 60); } else { *p++ = c; *p++ = lsr; if (p == sc->sc_ibufhigh && ISSET(tp->t_cflag, CRTSCTS)) { /* XXX */ CLR(sc->sc_mcr, MCR_RTS); bus_space_write_1(iot, ioh, SXIUART_MCR, sc->sc_mcr); } } goto loop; } else if (cnt > 0) sc->sc_ibufp = p; msr = bus_space_read_1(iot, ioh, SXIUART_MSR); if (msr != sc->sc_msr) { delta = msr ^ sc->sc_msr; ttytstamp(tp, sc->sc_msr & MSR_CTS, msr & MSR_CTS, sc->sc_msr & MSR_DCD, msr & MSR_DCD); sc->sc_msr = msr; if (ISSET(delta, MSR_DCD)) { if (!ISSET(sc->sc_swflags, COM_SW_SOFTCAR) && (*linesw[tp->t_line].l_modem)(tp, ISSET(msr, MSR_DCD)) == 0) { CLR(sc->sc_mcr, sc->sc_dtr); bus_space_write_1(iot, ioh, SXIUART_MCR, sc->sc_mcr); } } if (ISSET(delta & msr, MSR_CTS) && ISSET(tp->t_cflag, CRTSCTS)) (*linesw[tp->t_line].l_start)(tp); } if (ISSET(tp->t_state, TS_BUSY) && ISSET(lsr, LSR_TXRDY)) { CLR(tp->t_state, TS_BUSY | TS_FLUSH); if (sc->sc_halt > 0) wakeup(&tp->t_outq); (*linesw[tp->t_line].l_start)(tp); } iir = bus_space_read_1(iot, ioh, SXIUART_IIR); if (ISSET(iir, IIR_NOPEND)) goto done; cnt = 0; goto loop; done: return (1); }