/* * kmoutput * * Locks: Assumes tp is locked on entry, remains locked on exit * * Notes: Called from kmstart() and kmtimeout(); kmtimeout() is a * timer initiated by this routine to deal with pending * output not yet flushed (output is flushed at a maximum * of sizeof(buf) charatcers at a time before dropping into * the timeout code). */ static int kmoutput(struct tty *tp) { char buf[80]; /* buffer; limits output per call */ char *cp; int cc = -1; /* While there is data available to be output... */ while (tp->t_outq.c_cc > 0) { cc = ndqb(&tp->t_outq, 0); if (cc == 0) break; /* * attempt to output as many characters as are available, * up to the available transfer buffer size. */ cc = min(cc, sizeof buf); /* copy the output queue contents to the buffer */ (void) q_to_b(&tp->t_outq, (unsigned char *)buf, cc); for (cp = buf; cp < &buf[cc]; cp++) { /* output the buffer one charatcer at a time */ kmputc(tp->t_dev, *cp & 0x7f); } } if (tp->t_outq.c_cc > 0) { timeout((timeout_fcn_t)kmtimeout, tp, hz); } tp->t_state &= ~TS_BUSY; (*linesw[tp->t_line].l_start)(tp); return 0; }
/* * Retry wait for characters, for read. * No locks may be held. * May run on any CPU - does not talk to device driver. */ boolean_t char_read_done( register io_req_t ior) { register struct tty *tp = (struct tty *)ior->io_dev_ptr; register spl_t s = spltty(); simple_lock(&tp->t_lock); if (tp->t_inq.c_cc <= 0 || (tp->t_state & TS_CARR_ON) == 0) { queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done); simple_unlock(&tp->t_lock); splx(s); return FALSE; } ior->io_residual = ior->io_count - q_to_b(&tp->t_inq, ior->io_data, (int)ior->io_count); if (tp->t_state & TS_RTS_DOWN) { (*tp->t_mctl)(tp, TM_RTS, DMBIS); tp->t_state &= ~TS_RTS_DOWN; } simple_unlock(&tp->t_lock); splx(s); (void) ds_read_done(ior); return TRUE; }
static void pconsstart(struct tty *tp) { struct clist *cl; int s, len; uint8_t buf[OFBURSTLEN]; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) { splx(s); return; } tp->t_state |= TS_BUSY; splx(s); cl = &tp->t_outq; len = q_to_b(cl, buf, OFBURSTLEN); prom_putstr(buf, len); s = spltty(); tp->t_state &= ~TS_BUSY; if (ttypull(tp)) { tp->t_state |= TS_TIMEOUT; callout_schedule(&tp->t_rstrt_ch, 1); } splx(s); }
static void biconsdev_output(struct tty *tp) { int s, n; char buf[OBUFSIZ]; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) { splx(s); return; } tp->t_state |= TS_BUSY; splx(s); n = q_to_b(&tp->t_outq, buf, sizeof(buf)); bicons_putn(buf, n); s = spltty(); tp->t_state &= ~TS_BUSY; /* Come back if there's more to do */ if (ttypull(tp)) { tp->t_state |= TS_TIMEOUT; callout_schedule(&tp->t_rstrt_ch, 1); } splx(s); }
static int kmoutput( struct tty *tp) { /* * FIXME - to be grokked...copied from m68k km.c. */ char buf[80]; char *cp; int cc = -1; extern int hz; while (tp->t_outq.c_cc > 0) { cc = ndqb(&tp->t_outq, 0); if (cc == 0) break; cc = min(cc, sizeof buf); (void) q_to_b(&tp->t_outq, buf, cc); for (cp = buf; cp < &buf[cc]; cp++) { kmputc(*cp & 0x7f); } } if (tp->t_outq.c_cc > 0) { timeout((timeout_fcn_t)kmtimeout, tp, hz); } tp->t_state &= ~TS_BUSY; ttwwakeup(tp); return 0; }
static int kmoutput(struct tty *tp) { /* * FIXME - to be grokked...copied from m68k km.c. */ char buf[80]; char *cp; int cc = -1; while (tp->t_outq.c_cc > 0) { cc = ndqb(&tp->t_outq, 0); if (cc == 0) break; cc = min(cc, sizeof buf); (void) q_to_b(&tp->t_outq, (unsigned char *)buf, cc); for (cp = buf; cp < &buf[cc]; cp++) kmputc(tp->t_dev, *cp & 0x7f); } if (tp->t_outq.c_cc > 0) { timeout((timeout_fcn_t)kmtimeout, tp, hz); } tp->t_state &= ~TS_BUSY; (*linesw[tp->t_line].l_start)(tp); return 0; }
int uhid_do_read(struct uhid_softc *sc, struct uio *uio, int flag) { int s; int error = 0; int extra; size_t length; u_char buffer[UHID_CHUNK]; usbd_status err; DPRINTFN(1, ("uhidread\n")); if (sc->sc_state & UHID_IMMED) { DPRINTFN(1, ("uhidread immed\n")); extra = sc->sc_hdev.sc_report_id != 0; err = uhidev_get_report(&sc->sc_hdev, UHID_INPUT_REPORT, sc->sc_hdev.sc_report_id, buffer, sc->sc_hdev.sc_isize + extra); if (err) return (EIO); return (uiomove(buffer+extra, sc->sc_hdev.sc_isize, uio)); } s = splusb(); while (sc->sc_q.c_cc == 0) { if (flag & IO_NDELAY) { splx(s); return (EWOULDBLOCK); } sc->sc_state |= UHID_ASLP; DPRINTFN(5, ("uhidread: sleep on %p\n", &sc->sc_q)); error = tsleep(&sc->sc_q, PZERO | PCATCH, "uhidrea", 0); DPRINTFN(5, ("uhidread: woke, error=%d\n", error)); if (usbd_is_dying(sc->sc_hdev.sc_udev)) error = EIO; if (error) { sc->sc_state &= ~UHID_ASLP; break; } } splx(s); /* Transfer as many chunks as possible. */ while (sc->sc_q.c_cc > 0 && uio->uio_resid > 0 && !error) { length = min(sc->sc_q.c_cc, uio->uio_resid); if (length > sizeof(buffer)) length = sizeof(buffer); /* Remove a small chunk from the input queue. */ (void) q_to_b(&sc->sc_q, buffer, length); DPRINTFN(5, ("uhidread: got %lu chars\n", (u_long)length)); /* Copy the data to the user process. */ if ((error = uiomove(buffer, length, uio)) != 0) break; } return (error); }
static void zsttystart(struct tty *tp) { struct zstty_softc *sc; uint8_t c; sc = tp->t_dev->si_drv1; if ((tp->t_state & TS_TBLOCK) != 0) /* XXX clear RTS */; else /* XXX set RTS */; if ((tp->t_state & (TS_BUSY | TS_TIMEOUT | TS_TTSTOP)) != 0) { ttwwakeup(tp); return; } if (tp->t_outq.c_cc <= tp->t_olowat) { if ((tp->t_state & TS_SO_OLOWAT) != 0) { tp->t_state &= ~TS_SO_OLOWAT; wakeup(TSA_OLOWAT(tp)); } selwakeup(&tp->t_wsel); if (tp->t_outq.c_cc == 0) { if ((tp->t_state & (TS_BUSY | TS_SO_OCOMPLETE)) == TS_SO_OCOMPLETE && tp->t_outq.c_cc == 0) { tp->t_state &= ~TS_SO_OCOMPLETE; wakeup(TSA_OCOMPLETE(tp)); } return; } } sc->sc_ocnt = q_to_b(&tp->t_outq, sc->sc_obuf, sizeof(sc->sc_obuf)); if (sc->sc_ocnt == 0) return; c = sc->sc_obuf[0]; sc->sc_oget = sc->sc_obuf + 1; sc->sc_ocnt--; tp->t_state |= TS_BUSY; sc->sc_tx_busy = 1; /* * Enable transmit interrupts if necessary and send the first * character to start up the transmitter. */ if ((sc->sc_preg[1] & ZSWR1_TIE) == 0) { sc->sc_preg[1] |= ZSWR1_TIE; sc->sc_creg[1] = sc->sc_preg[1]; ZS_WRITE_REG(sc, 1, sc->sc_creg[1]); } ZS_WRITE(sc, sc->sc_data, c); ttwwakeup(tp); }
/* * Read from TTY. * No locks may be held. * May run on any CPU - does not talk to device driver. */ io_return_t char_read( register struct tty *tp, register io_req_t ior) { spl_t s; kern_return_t rc; /* * Allocate memory for read buffer. */ rc = device_read_alloc(ior, (vm_size_t)ior->io_count); if (rc != KERN_SUCCESS) return rc; s = spltty(); simple_lock(&tp->t_lock); if ((tp->t_state & TS_CARR_ON) == 0) { if ((tp->t_state & TS_ONDELAY) == 0) { /* * No delayed writes - tell caller that device is down */ rc = D_IO_ERROR; goto out; } if (ior->io_mode & D_NOWAIT) { rc = D_WOULD_BLOCK; goto out; } } if (tp->t_inq.c_cc <= 0 || (tp->t_state & TS_CARR_ON) == 0) { ior->io_dev_ptr = (char *)tp; queue_delayed_reply(&tp->t_delayed_read, ior, char_read_done); rc = D_IO_QUEUED; goto out; } ior->io_residual = ior->io_count - q_to_b(&tp->t_inq, ior->io_data, (int)ior->io_count); if (tp->t_state & TS_RTS_DOWN) { (*tp->t_mctl)(tp, TM_RTS, DMBIS); tp->t_state &= ~TS_RTS_DOWN; } out: simple_unlock(&tp->t_lock); splx(s); return rc; }
void imxuart_start(struct tty *tp) { struct imxuart_softc *sc = imxuart_cd.cd_devs[DEVUNIT(tp->t_dev)]; bus_space_tag_t iot = sc->sc_iot; bus_space_handle_t ioh = sc->sc_ioh; int s; s = spltty(); if (ISSET(tp->t_state, TS_BUSY)) { splx(s); return; } if (ISSET(tp->t_state, TS_TIMEOUT | TS_TTSTOP)) goto stopped; #ifdef DAMNFUCKSHIT /* clear to send (IE the RTS pin on this shit) is not directly \ * readable - skip check for now */ if (ISSET(tp->t_cflag, CRTSCTS) && !ISSET(sc->sc_msr, IMXUART_CTS)) goto stopped; #endif if (tp->t_outq.c_cc <= tp->t_lowat) { if (ISSET(tp->t_state, TS_ASLEEP)) { CLR(tp->t_state, TS_ASLEEP); wakeup(&tp->t_outq); } if (tp->t_outq.c_cc == 0) goto stopped; selwakeup(&tp->t_wsel); } SET(tp->t_state, TS_BUSY); if (!ISSET(sc->sc_ucr1, IMXUART_CR1_TXMPTYEN)) { SET(sc->sc_ucr1, IMXUART_CR1_TXMPTYEN); bus_space_write_2(iot, ioh, IMXUART_UCR1, sc->sc_ucr1); } { u_char buf[32]; int n = q_to_b(&tp->t_outq, buf, 32/*XXX*/); int i; for (i = 0; i < n; i++) bus_space_write_1(iot, ioh, IMXUART_UTXD, buf[i]); } splx(s); return; stopped: if (ISSET(sc->sc_ucr1, IMXUART_CR1_TXMPTYEN)) { CLR(sc->sc_ucr1, IMXUART_CR1_TXMPTYEN); bus_space_write_2(iot, ioh, IMXUART_UCR1, sc->sc_ucr1); } splx(s); }
void vioconstart(struct tty *tp) { struct viocon_softc *sc = dev2sc(tp->t_dev); struct virtio_softc *vsc; struct viocon_port *vp = dev2port(tp->t_dev); struct virtqueue *vq; u_char *buf; int s, cnt, slot, ret, ndone; vsc = sc->sc_virtio; vq = vp->vp_tx; s = spltty(); ndone = viocon_tx_drain(vp, vq); if (ISSET(tp->t_state, TS_BUSY)) { if (ndone > 0) CLR(tp->t_state, TS_BUSY); else goto out; } if (ISSET(tp->t_state, TS_TIMEOUT | TS_TTSTOP)) goto out; if (tp->t_outq.c_cc == 0) goto out; ndone = 0; while (tp->t_outq.c_cc > 0) { ret = virtio_enqueue_prep(vq, &slot); if (ret == EAGAIN) break; KASSERT(ret == 0); ret = virtio_enqueue_reserve(vq, slot, 1); KASSERT(ret == 0); buf = vp->vp_tx_buf + slot * BUFSIZE; cnt = q_to_b(&tp->t_outq, buf, BUFSIZE); bus_dmamap_sync(vsc->sc_dmat, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, BUS_DMASYNC_PREWRITE); virtio_enqueue_p(vq, slot, vp->vp_dmamap, vp->vp_tx_buf - vp->vp_rx_buf + slot * BUFSIZE, cnt, 1); virtio_enqueue_commit(vsc, vq, slot, 0); ndone++; } if (ret == EAGAIN) SET(tp->t_state, TS_BUSY); if (ndone > 0) virtio_notify(vsc, vq); ttwakeupwr(tp); out: splx(s); }
/* * cztty_transmit() * * Look at the tty for this port and start sending. */ static int cztty_transmit(struct cztty_softc *sc, struct tty *tp) { struct cz_softc *cz = CZTTY_CZ(sc); u_int move, get, put, size, address; #ifdef HOSTRAMCODE int error, done = 0; #else int done = 0; #endif size = CZTTY_BUF_READ(sc, BUFCTL_TX_BUFSIZE); get = CZTTY_BUF_READ(sc, BUFCTL_TX_GET); put = CZTTY_BUF_READ(sc, BUFCTL_TX_PUT); address = CZTTY_BUF_READ(sc, BUFCTL_TX_BUFADDR); while ((tp->t_outq.c_cc > 0) && ((move = TX_MOVEABLE(get, put, size)))){ #ifdef HOSTRAMCODE if (0) { move = min(tp->t_outq.c_cc, move); error = q_to_b(&tp->t_outq, 0, move); if (error != move) { printf("%s: channel %d: error moving to " "transmit buf\n", device_xname(cz->cz_dev), sc->sc_channel); move = error; } } else { #endif move = min(ndqb(&tp->t_outq, 0), move); bus_space_write_region_1(cz->cz_win_st, cz->cz_win_sh, address + put, tp->t_outq.c_cf, move); ndflush(&tp->t_outq, move); #ifdef HOSTRAMCODE } #endif put = ((put + move) % size); done = 1; } if (done) { CZTTY_BUF_WRITE(sc, BUFCTL_TX_PUT, put); } return (done); }
/* * kmoutput * * Locks: Assumes tp is locked on entry, remains locked on exit * * Notes: Called from kmstart() and kmtimeout(); kmtimeout() is a * timer initiated by this routine to deal with pending * output not yet flushed (output is flushed at a maximum * of sizeof(buf) charatcers at a time before dropping into * the timeout code). */ static int kmoutput(struct tty *tp) { unsigned char buf[80]; /* buffer; limits output per call */ unsigned char *cp; int cc = -1; /* * While there is data available to be output... */ while (tp->t_outq.c_cc > 0) { cc = ndqb(&tp->t_outq, 0); if (cc == 0) break; /* * attempt to output as many characters as are available, * up to the available transfer buffer size. */ cc = min(cc, sizeof(buf)); /* * copy the output queue contents to the buffer */ (void) q_to_b(&tp->t_outq, buf, cc); for (cp = buf; cp < &buf[cc]; cp++) { /* * output the buffer one charatcer at a time */ kmputc(tp->t_dev, *cp & 0x7f); } } /* * XXX This is likely not necessary, as the tty output queue is not * XXX writeable while we hold the tty_lock(). */ if (tp->t_outq.c_cc > 0) { timeout(kmtimeout, tp, hz); } tp->t_state &= ~TS_BUSY; /* * Start the output processing for the line discipline */ (*linesw[tp->t_line].l_start) (tp); return 0; }
/* * Put text on the screen using the PROM monitor. * This can take a while, so to avoid missing * interrupts, this is called at splsoftclock. */ static void kd_putfb(struct tty *tp) { char buf[PUT_WSIZE]; struct clist *cl = &tp->t_outq; char *p, *end; int len; while ((len = q_to_b(cl, buf, PUT_WSIZE-1)) > 0) { /* PROM will barf if high bits are set. */ p = buf; end = buf + len; while (p < end) *p++ &= 0x7f; /* Now let the PROM print it. */ prom_putstr(buf, len); } }
void itestart(struct tty *tp) { struct clist *rbp; struct ite_softc *ip; u_char buf[ITEBURST]; int s, len; ip = getitesp(tp->t_dev); KDASSERT(tp); s = spltty(); { if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) goto out; tp->t_state |= TS_BUSY; rbp = &tp->t_outq; len = q_to_b(rbp, buf, ITEBURST); } splx(s); /* Here is a really good place to implement pre/jumpscroll() */ ite_putstr(buf, len, tp->t_dev); s = spltty(); { tp->t_state &= ~TS_BUSY; /* we have characters remaining. */ if (ttypull(tp)) { tp->t_state |= TS_TIMEOUT; callout_schedule(&tp->t_rstrt_ch, 1); } } out: splx(s); }
/* * Nudge the transmitter... * * XXX: I inherited some funny code here. It implies the host card only * interrupts when the transmit buffer reaches the low-water-mark, and does * not interrupt when it's actually hits empty. In some cases, we have * processes waiting for complete drain, and we need to simulate an interrupt * about when we think the buffer is going to be empty (and retry if not). * I really am not certain about this... I *need* the hardware manuals. */ static void si_start(struct tty *tp) { struct si_port *pp; volatile struct si_channel *ccbp; struct clist *qp; BYTE ipos; int nchar; int oldspl, count, n, amount, buffer_full; oldspl = spltty(); qp = &tp->t_outq; pp = tp->t_sc; DPRINT((pp, DBG_ENTRY|DBG_START, "si_start(%x) t_state %x sp_state %x t_outq.c_cc %d\n", tp, tp->t_state, pp->sp_state, qp->c_cc)); if (tp->t_state & (TS_TIMEOUT|TS_TTSTOP)) goto out; buffer_full = 0; ccbp = pp->sp_ccb; count = (int)ccbp->hi_txipos - (int)ccbp->hi_txopos; DPRINT((pp, DBG_START, "count %d\n", (BYTE)count)); while ((nchar = qp->c_cc) > 0) { if ((BYTE)count >= 255) { buffer_full++; break; } amount = min(nchar, (255 - (BYTE)count)); ipos = (unsigned int)ccbp->hi_txipos; n = q_to_b(&tp->t_outq, si_txbuf, amount); /* will it fit in one lump? */ if ((SI_BUFFERSIZE - ipos) >= n) { si_bcopyv(si_txbuf, &ccbp->hi_txbuf[ipos], n); } else { si_bcopyv(si_txbuf, &ccbp->hi_txbuf[ipos], SI_BUFFERSIZE - ipos); si_bcopyv(si_txbuf + (SI_BUFFERSIZE - ipos), &ccbp->hi_txbuf[0], n - (SI_BUFFERSIZE - ipos)); } ccbp->hi_txipos += n; count = (int)ccbp->hi_txipos - (int)ccbp->hi_txopos; } if (count != 0 && nchar == 0) { tp->t_state |= TS_BUSY; } else { tp->t_state &= ~TS_BUSY; } /* wakeup time? */ ttwwakeup(tp); DPRINT((pp, DBG_START, "count %d, nchar %d, tp->t_state 0x%x\n", (BYTE)count, nchar, tp->t_state)); if (tp->t_state & TS_BUSY) { int time; time = ttspeedtab(tp->t_ospeed, chartimes); if (time > 0) { if (time < nchar) time = nchar / time; else time = 2; } else { DPRINT((pp, DBG_START, "bad char time value! %d\n", time)); time = hz/10; } if ((pp->sp_state & (SS_LSTART|SS_INLSTART)) == SS_LSTART) { untimeout(si_lstart, (caddr_t)pp, pp->lstart_ch); } else { pp->sp_state |= SS_LSTART; } DPRINT((pp, DBG_START, "arming lstart, time=%d\n", time)); pp->lstart_ch = timeout(si_lstart, (caddr_t)pp, time); } out: splx(oldspl); DPRINT((pp, DBG_EXIT|DBG_START, "leave si_start()\n")); }
int ptcread(dev_t dev, struct uio *uio, int flag) { struct pt_softc *pti = pt_softc[minor(dev)]; struct tty *tp = pti->pt_tty; char buf[BUFSIZ]; int error = 0, cc, bufcc = 0; /* * We want to block until the slave * is open, and there's something to read; * but if we lost the slave or we're NBIO, * then return the appropriate error instead. */ for (;;) { if (tp->t_state&TS_ISOPEN) { if (pti->pt_flags&PF_PKT && pti->pt_send) { error = ureadc((int)pti->pt_send, uio); if (error) return (error); if (pti->pt_send & TIOCPKT_IOCTL) { cc = MIN(uio->uio_resid, sizeof(tp->t_termios)); uiomove(&tp->t_termios, cc, uio); } pti->pt_send = 0; return (0); } if (pti->pt_flags&PF_UCNTL && pti->pt_ucntl) { error = ureadc((int)pti->pt_ucntl, uio); if (error) return (error); pti->pt_ucntl = 0; return (0); } if (tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) break; } if ((tp->t_state&TS_CARR_ON) == 0) return (0); /* EOF */ if (flag & IO_NDELAY) return (EWOULDBLOCK); error = tsleep(&tp->t_outq.c_cf, TTIPRI | PCATCH, ttyin, 0); if (error) return (error); } if (pti->pt_flags & (PF_PKT|PF_UCNTL)) error = ureadc(0, uio); while (uio->uio_resid > 0 && error == 0) { cc = MIN(uio->uio_resid, BUFSIZ); cc = q_to_b(&tp->t_outq, buf, cc); if (cc > bufcc) bufcc = cc; if (cc <= 0) break; error = uiomove(buf, cc, uio); } if (tp->t_outq.c_cc <= tp->t_lowat) { if (tp->t_state&TS_ASLEEP) { tp->t_state &= ~TS_ASLEEP; wakeup(&tp->t_outq); } selwakeup(&tp->t_wsel); } if (bufcc) bzero(buf, bufcc); return (error); }
static int ptcread(struct dev_read_args *ap) { cdev_t dev = ap->a_head.a_dev; struct tty *tp = dev->si_tty; struct pt_ioctl *pti = dev->si_drv1; char buf[BUFSIZ]; int error = 0, cc; lwkt_gettoken(&tty_token); /* * We want to block until the slave * is open, and there's something to read; * but if we lost the slave or we're NBIO, * then return the appropriate error instead. */ for (;;) { if (tp->t_state&TS_ISOPEN) { if ((pti->pt_flags & PF_PKT) && pti->pt_send) { error = ureadc((int)pti->pt_send, ap->a_uio); if (error) { lwkt_reltoken(&tty_token); return (error); } if (pti->pt_send & TIOCPKT_IOCTL) { cc = (int)szmin(ap->a_uio->uio_resid, sizeof(tp->t_termios)); uiomove((caddr_t)&tp->t_termios, cc, ap->a_uio); } pti->pt_send = 0; lwkt_reltoken(&tty_token); return (0); } if ((pti->pt_flags & PF_UCNTL) && pti->pt_ucntl) { error = ureadc((int)pti->pt_ucntl, ap->a_uio); if (error) { lwkt_reltoken(&tty_token); return (error); } pti->pt_ucntl = 0; lwkt_reltoken(&tty_token); return (0); } if (tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) break; } if ((tp->t_state & TS_CONNECTED) == 0) { lwkt_reltoken(&tty_token); return (0); /* EOF */ } if (ap->a_ioflag & IO_NDELAY) { lwkt_reltoken(&tty_token); return (EWOULDBLOCK); } error = tsleep(TSA_PTC_READ(tp), PCATCH, "ptcin", 0); if (error) { lwkt_reltoken(&tty_token); return (error); } } if (pti->pt_flags & (PF_PKT|PF_UCNTL)) error = ureadc(0, ap->a_uio); while (ap->a_uio->uio_resid > 0 && error == 0) { cc = q_to_b(&tp->t_outq, buf, (int)szmin(ap->a_uio->uio_resid, BUFSIZ)); if (cc <= 0) break; error = uiomove(buf, (size_t)cc, ap->a_uio); } ttwwakeup(tp); lwkt_reltoken(&tty_token); return (error); }
int ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) { struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; u_int32_t n, tn; char buf[UGEN_BBSIZE]; struct usbd_xfer *xfer; usbd_status err; int s; int flags, error = 0; u_char buffer[UGEN_CHUNK]; DPRINTFN(5, ("%s: ugenread: %d\n", sc->sc_dev.dv_xname, endpt)); if (usbd_is_dying(sc->sc_udev)) return (EIO); if (endpt == USB_CONTROL_ENDPOINT) return (ENODEV); #ifdef DIAGNOSTIC if (sce->edesc == NULL) { printf("ugenread: no edesc\n"); return (EIO); } if (sce->pipeh == NULL) { printf("ugenread: no pipe\n"); return (EIO); } #endif switch (sce->edesc->bmAttributes & UE_XFERTYPE) { case UE_INTERRUPT: /* Block until activity occurred. */ s = splusb(); while (sce->q.c_cc == 0) { if (flag & IO_NDELAY) { splx(s); return (EWOULDBLOCK); } sce->state |= UGEN_ASLP; DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); error = tsleep(sce, PZERO | PCATCH, "ugenri", (sce->timeout * hz) / 1000); sce->state &= ~UGEN_ASLP; DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); if (usbd_is_dying(sc->sc_udev)) error = EIO; if (error == EWOULDBLOCK) { /* timeout, return 0 */ error = 0; break; } if (error) break; } splx(s); /* Transfer as many chunks as possible. */ while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { n = min(sce->q.c_cc, uio->uio_resid); if (n > sizeof(buffer)) n = sizeof(buffer); /* Remove a small chunk from the input queue. */ q_to_b(&sce->q, buffer, n); DPRINTFN(5, ("ugenread: got %d chars\n", n)); /* Copy the data to the user process. */ error = uiomove(buffer, n, uio); if (error) break; } break; case UE_BULK: xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == 0) return (ENOMEM); flags = USBD_SYNCHRONOUS; if (sce->state & UGEN_SHORT_OK) flags |= USBD_SHORT_XFER_OK; if (sce->timeout == 0) flags |= USBD_CATCH; while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n, flags, sce->timeout, NULL); err = usbd_transfer(xfer); if (err) { usbd_clear_endpoint_stall(sce->pipeh); if (err == USBD_INTERRUPTED) error = EINTR; else if (err == USBD_TIMEOUT) error = ETIMEDOUT; else error = EIO; break; } usbd_get_xfer_status(xfer, NULL, NULL, &tn, NULL); DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); error = uiomove(buf, tn, uio); if (error || tn < n) break; } usbd_free_xfer(xfer); break; case UE_ISOCHRONOUS: s = splusb(); while (sce->cur == sce->fill) { if (flag & IO_NDELAY) { splx(s); return (EWOULDBLOCK); } sce->state |= UGEN_ASLP; DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); error = tsleep(sce, PZERO | PCATCH, "ugenri", (sce->timeout * hz) / 1000); sce->state &= ~UGEN_ASLP; DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); if (usbd_is_dying(sc->sc_udev)) error = EIO; if (error == EWOULDBLOCK) { /* timeout, return 0 */ error = 0; break; } if (error) break; } while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { if(sce->fill > sce->cur) n = min(sce->fill - sce->cur, uio->uio_resid); else n = min(sce->limit - sce->cur, uio->uio_resid); DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); /* Copy the data to the user process. */ error = uiomove(sce->cur, n, uio); if (error) break; sce->cur += n; if(sce->cur >= sce->limit) sce->cur = sce->ibuf; } splx(s); break; default: return (ENXIO); } return (error); }
void xencons_start(struct tty *tp) { struct clist *cl; int s; s = spltty(); if (tp->t_state & (TS_TIMEOUT | TS_BUSY | TS_TTSTOP)) goto out; tp->t_state |= TS_BUSY; splx(s); /* * We need to do this outside spl since it could be fairly * expensive and we don't want our serial ports to overflow. */ cl = &tp->t_outq; if (xendomain_is_dom0()) { int len, r; u_char buf[XENCONS_BURST+1]; len = q_to_b(cl, buf, XENCONS_BURST); while (len > 0) { r = HYPERVISOR_console_io(CONSOLEIO_write, len, buf); if (r <= 0) break; len -= r; } } else { XENCONS_RING_IDX cons, prod, len; #define XNC_OUT (xencons_interface->out) cons = xencons_interface->out_cons; prod = xencons_interface->out_prod; xen_rmb(); while (prod != cons + sizeof(xencons_interface->out)) { if (MASK_XENCONS_IDX(prod, XNC_OUT) < MASK_XENCONS_IDX(cons, XNC_OUT)) { len = MASK_XENCONS_IDX(cons, XNC_OUT) - MASK_XENCONS_IDX(prod, XNC_OUT); } else { len = sizeof(XNC_OUT) - MASK_XENCONS_IDX(prod, XNC_OUT); } len = q_to_b(cl, __UNVOLATILE( &XNC_OUT[MASK_XENCONS_IDX(prod, XNC_OUT)]), len); if (len == 0) break; prod = prod + len; } xen_wmb(); xencons_interface->out_prod = prod; xen_wmb(); hypervisor_notify_via_evtchn(xen_start_info.console.domU.evtchn); #undef XNC_OUT } s = spltty(); tp->t_state &= ~TS_BUSY; if (ttypull(tp)) { tp->t_state |= TS_TIMEOUT; callout_schedule(&tp->t_rstrt_ch, 1); } out: splx(s); }