int stty_open(dev_t dev, int flags, int mode, struct lwp *l) { struct spif_softc *csc; struct stty_softc *sc; struct stty_port *sp; struct tty *tp; int card = SPIF_CARD(dev); int port = SPIF_PORT(dev); sc = device_lookup_private(&stty_cd, card); csc = device_lookup_private(&spif_cd, card); if (sc == NULL || csc == NULL) return (ENXIO); if (port >= sc->sc_nports) return (ENXIO); sp = &sc->sc_port[port]; tp = sp->sp_tty; tp->t_dev = dev; if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp)) return (EBUSY); mutex_spin_enter(&tty_lock); if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { ttychars(tp); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_cflag = TTYDEF_CFLAG; if (ISSET(sp->sp_openflags, TIOCFLAG_CLOCAL)) SET(tp->t_cflag, CLOCAL); if (ISSET(sp->sp_openflags, TIOCFLAG_CRTSCTS)) SET(tp->t_cflag, CRTSCTS); if (ISSET(sp->sp_openflags, TIOCFLAG_MDMBUF)) SET(tp->t_cflag, MDMBUF); tp->t_lflag = TTYDEF_LFLAG; tp->t_ispeed = tp->t_ospeed = TTYDEF_SPEED; sp->sp_rput = sp->sp_rget = sp->sp_rbuf; STC_WRITE(csc, STC_CAR, sp->sp_channel); stty_write_ccr(csc, CD180_CCR_CMD_RESET|CD180_CCR_RESETCHAN); STC_WRITE(csc, STC_CAR, sp->sp_channel); stty_param(tp, &tp->t_termios); ttsetwater(tp); STC_WRITE(csc, STC_SRER, CD180_SRER_CD | CD180_SRER_RXD); if (ISSET(sp->sp_openflags, TIOCFLAG_SOFTCAR) || sp->sp_carrier) SET(tp->t_state, TS_CARR_ON); else CLR(tp->t_state, TS_CARR_ON); } if (!ISSET(flags, O_NONBLOCK)) { while (!ISSET(tp->t_cflag, CLOCAL) && !ISSET(tp->t_state, TS_CARR_ON)) { int error; error = ttysleep(tp, &tp->t_rawcv, true, 0); if (error != 0) { mutex_spin_exit(&tty_lock); return (error); } } } mutex_spin_exit(&tty_lock); return ((*tp->t_linesw->l_open)(dev, tp)); }
int at91usart_open(dev_t dev, int flag, int mode, struct lwp *l) { struct at91usart_softc *sc; struct tty *tp; int s; int error; sc = device_lookup_private(&at91usart_cd, COMUNIT(dev)); if (sc == NULL || !ISSET(sc->sc_hwflags, COM_HW_DEV_OK)) return (ENXIO); if (!device_is_active(sc->sc_dev)) return (ENXIO); #ifdef KGDB /* * If this is the kgdb port, no other use is permitted. */ if (ISSET(sc->sc_hwflags, COM_HW_KGDB)) return (EBUSY); #endif tp = sc->sc_tty; if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp)) return (EBUSY); s = spltty(); /* * Do the following iff this is a first open. */ if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { struct termios t; tp->t_dev = dev; if (sc->enable) { if ((*sc->enable)(sc)) { splx(s); printf("%s: device enable failed\n", device_xname(sc->sc_dev)); return (EIO); } sc->enabled = 1; #if 0 /* XXXXXXXXXXXXXXX */ com_config(sc); #endif } /* reset fifos: */ AT91PDC_RESET_FIFO(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC, &sc->sc_rx_fifo, 0); AT91PDC_RESET_FIFO(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC, &sc->sc_tx_fifo, 1); /* reset receive */ at91usart_writereg(sc, US_CR, US_CR_RSTSTA | US_CR_STTTO); /* Turn on interrupts. */ sc->sc_ier = US_CSR_ENDRX|US_CSR_RXBUFF|US_CSR_TIMEOUT|US_CSR_RXBRK; at91usart_writereg(sc, US_IER, sc->sc_ier); /* enable DMA: */ at91usart_writereg(sc, US_PDC + PDC_PTCR, PDC_PTCR_RXTEN); /* * Initialize the termios status to the defaults. Add in the * sticky bits from TIOCSFLAGS. */ t.c_ispeed = 0; /* if (ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) { t.c_ospeed = usart_cn_sc.sc_ospeed; t.c_cflag = usart_cn_sc.sc_cflag; } else*/ { t.c_ospeed = TTYDEF_SPEED; t.c_cflag = TTYDEF_CFLAG; } if (ISSET(sc->sc_swflags, TIOCFLAG_CLOCAL)) SET(t.c_cflag, CLOCAL); if (ISSET(sc->sc_swflags, TIOCFLAG_CRTSCTS)) SET(t.c_cflag, CRTSCTS); if (ISSET(sc->sc_swflags, TIOCFLAG_MDMBUF)) SET(t.c_cflag, MDMBUF); /* Make sure at91usart_param() will do something. */ tp->t_ospeed = 0; (void) at91usart_param(tp, &t); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; ttychars(tp); ttsetwater(tp); /* and unblock. */ CLR(sc->sc_rx_flags, RX_ANY_BLOCK); #ifdef COM_DEBUG if (at91usart_debug) comstatus(sc, "at91usart_open "); #endif } splx(s); error = ttyopen(tp, COMDIALOUT(dev), ISSET(flag, O_NONBLOCK)); if (error) goto bad; error = (*tp->t_linesw->l_open)(dev, tp); if (error) goto bad; return (0); bad: if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { /* * We failed to open the device, and nobody else had it opened. * Clean up the state as appropriate. */ at91usart_shutdown(sc); } return (error); }
/* * Multiple window output handler. * The idea is to copy window outputs to the terminal, via the * display package. We try to give wwcurwin highest priority. * The only return conditions are when there is keyboard input * and when a child process dies. * When there's nothing to do, we sleep in a select(). * The history of this routine is interesting. */ void wwiomux(void) { struct ww *w; nfds_t nfd; int i; int volatile dostdin; /* avoid longjmp clobbering */ char volatile c; /* avoid longjmp clobbering */ char *p; int millis; char noblock = 0; static struct pollfd *pfd = NULL; static nfds_t maxfds = 0; c = 0; /* XXXGCC -Wuninitialized */ for (;;) { if (wwinterrupt()) { wwclrintr(); return; } nfd = 0; for (w = wwhead.ww_forw; w != &wwhead; w = w->ww_forw) { if (w->ww_pty < 0 || w->ww_obq >= w->ww_obe) continue; nfd++; } if (maxfds <= ++nfd) { /* One more for the fd=0 case below */ struct pollfd *npfd = pfd == NULL ? malloc(sizeof(*pfd) * nfd) : realloc(pfd, sizeof(*pfd) * nfd); if (npfd == NULL) { warn("will retry"); if (pfd) free(pfd); pfd = NULL; maxfds = 0; return; } pfd = npfd; maxfds = nfd; } nfd = 0; for (w = wwhead.ww_forw; w != &wwhead; w = w->ww_forw) { if (w->ww_pty < 0) continue; if (w->ww_obq < w->ww_obe) { pfd[nfd].fd = w->ww_pty; pfd[nfd++].events = POLLIN; } if (w->ww_obq > w->ww_obp && !ISSET(w->ww_pflags, WWP_STOPPED)) noblock = 1; } if (wwibq < wwibe) { dostdin = nfd; pfd[nfd].fd = 0; pfd[nfd++].events = POLLIN; } else { dostdin = -1; } if (!noblock) { if (wwcurwin != 0) wwcurtowin(wwcurwin); wwupdate(); wwflush(); (void) setjmp(wwjmpbuf); wwsetjmp = 1; if (wwinterrupt()) { wwsetjmp = 0; wwclrintr(); return; } /* XXXX */ millis = 30000; } else { millis = 10; } wwnselect++; i = poll(pfd, nfd, millis); wwsetjmp = 0; noblock = 0; if (i < 0) wwnselecte++; else if (i == 0) wwnselectz++; else { if (dostdin != -1 && (pfd[dostdin].revents & POLLIN) != 0) wwrint(); nfd = 0; for (w = wwhead.ww_forw; w != &wwhead; w = w->ww_forw) { int n; if (w->ww_pty < 0) continue; if (w->ww_pty != pfd[nfd].fd) continue; if ((pfd[nfd++].revents & POLLIN) == 0) continue; wwnwread++; p = w->ww_obq; if (w->ww_type == WWT_PTY) { if (p == w->ww_ob) { w->ww_obp++; w->ww_obq++; } else p--; c = *p; } n = read(w->ww_pty, p, w->ww_obe - p); if (n < 0) { wwnwreade++; (void) close(w->ww_pty); w->ww_pty = -1; } else if (n == 0) { wwnwreadz++; (void) close(w->ww_pty); w->ww_pty = -1; } else if (w->ww_type != WWT_PTY) { wwnwreadd++; wwnwreadc += n; w->ww_obq += n; } else if (*p == TIOCPKT_DATA) { n--; wwnwreadd++; wwnwreadc += n; w->ww_obq += n; } else { wwnwreadp++; if (*p & TIOCPKT_STOP) SET(w->ww_pflags, WWP_STOPPED); if (*p & TIOCPKT_START) CLR(w->ww_pflags, WWP_STOPPED); if (*p & TIOCPKT_FLUSHWRITE) { CLR(w->ww_pflags, WWP_STOPPED); w->ww_obq = w->ww_obp = w->ww_ob; } } if (w->ww_type == WWT_PTY) *p = c; } } /* * Try the current window first, if there is output * then process it and go back to the top to try again. * This can lead to starvation of the other windows, * but presumably that what we want. * Update will eventually happen when output from wwcurwin * dies down. */ if ((w = wwcurwin) != NULL && w->ww_pty >= 0 && w->ww_obq > w->ww_obp && !ISSET(w->ww_pflags, WWP_STOPPED)) { int n = wwwrite(w, w->ww_obp, w->ww_obq - w->ww_obp); if ((w->ww_obp += n) == w->ww_obq) w->ww_obq = w->ww_obp = w->ww_ob; noblock = 1; continue; } for (w = wwhead.ww_forw; w != &wwhead; w = w->ww_forw) if (w->ww_pty >= 0 && w->ww_obq > w->ww_obp && !ISSET(w->ww_pflags, WWP_STOPPED)) { int n = wwwrite(w, w->ww_obp, w->ww_obq - w->ww_obp); if ((w->ww_obp += n) == w->ww_obq) w->ww_obq = w->ww_obp = w->ww_ob; if (wwinterrupt()) break; } } }
static int vnop_open_9p(struct vnop_open_args *ap) { openfid_9p *op; node_9p *np; fid_9p fid; qid_9p qid; uint32_t iounit; int e, flags, mode; TRACE(); flags = 0; if (ap->a_mode) flags = OFLAGS(ap->a_mode); mode = flags & O_ACCMODE; CLR(flags, O_ACCMODE); CLR(flags, O_DIRECTORY|O_NONBLOCK|O_NOFOLLOW); CLR(flags, O_APPEND); /* locks implemented on the vfs layer */ CLR(flags, O_EXLOCK|O_SHLOCK); if (ISSET(flags, O_TRUNC)) { SET(mode, OTRUNC); CLR(flags, O_TRUNC); } if (ISSET(flags, O_CLOEXEC)) { SET(mode, OCEXEC); CLR(flags, O_CLOEXEC); } if (ISSET(flags, O_EXCL)) { SET(mode, OEXCL); CLR(flags, O_EXCL); } /* vnop_creat just called */ CLR(flags, O_CREAT); if (ISSET(flags, O_EVTONLY)) CLR(flags, O_EVTONLY); if (ISSET(flags, FNOCACHE)) CLR(flags, FNOCACHE); if (ISSET(flags, FNORDAHEAD)) CLR(flags, FNORDAHEAD); if (flags) { DEBUG("unexpected open mode %x", flags); return ENOTSUP; } np = NTO9P(ap->a_vp); nlock_9p(np, NODE_LCK_EXCLUSIVE); op = ofidget(np, ap->a_mode); if (op->fid == NOFID) { if ((e=walk_9p(np->nmp, np->fid, NULL, 0, &fid, &qid))) goto error; if ((e=open_9p(np->nmp, fid, mode, &qid, &iounit))) goto error; np->iounit = iounit; op->fid = fid; } /* no cache for dirs, .u or synthetic files */ if (!vnode_isreg(np->vp) || np->dir.qid.vers==0) { vnode_setnocache(np->vp); vnode_setnoreadahead(np->vp); } OSIncrementAtomic(&op->ref); nunlock_9p(np); return 0; error: clunk_9p(np->nmp, fid); nunlock_9p(np); return e; }
void setflags(int n) { tcflag_t iflag, oflag, cflag, lflag; switch (n) { case 0: if (C0set && I0set && L0set && O0set) { tmode.c_cflag = C0; tmode.c_iflag = I0; tmode.c_lflag = L0; tmode.c_oflag = O0; return; } break; case 1: if (C1set && I1set && L1set && O1set) { tmode.c_cflag = C1; tmode.c_iflag = I1; tmode.c_lflag = L1; tmode.c_oflag = O1; return; } break; default: if (C2set && I2set && L2set && O2set) { tmode.c_cflag = C2; tmode.c_iflag = I2; tmode.c_lflag = L2; tmode.c_oflag = O2; return; } break; } iflag = omode.c_iflag; oflag = omode.c_oflag; cflag = omode.c_cflag; lflag = omode.c_lflag; if (NP) { CLR(cflag, CSIZE|PARENB); SET(cflag, CS8); CLR(iflag, ISTRIP|INPCK|IGNPAR); } else if (AP || EP || OP) { CLR(cflag, CSIZE); SET(cflag, CS7|PARENB); SET(iflag, ISTRIP); if (OP && !EP) { SET(iflag, INPCK|IGNPAR); SET(cflag, PARODD); if (AP) CLR(iflag, INPCK); } else if (EP && !OP) { SET(iflag, INPCK|IGNPAR); CLR(cflag, PARODD); if (AP) CLR(iflag, INPCK); } else if (AP || (EP && OP)) { CLR(iflag, INPCK|IGNPAR); CLR(cflag, PARODD); } } /* else, leave as is */ if (UC) { SET(iflag, IUCLC); SET(oflag, OLCUC); SET(lflag, XCASE); } if (HC) SET(cflag, HUPCL); else CLR(cflag, HUPCL); if (MB) SET(cflag, MDMBUF); else CLR(cflag, MDMBUF); if (NL) { SET(iflag, ICRNL); SET(oflag, ONLCR|OPOST); } else { CLR(iflag, ICRNL); CLR(oflag, ONLCR); } if (!HT) SET(oflag, OXTABS|OPOST); else CLR(oflag, OXTABS); #ifdef XXX_DELAY SET(f, delaybits()); #endif if (n == 1) { /* read mode flags */ if (RW) { iflag = 0; CLR(oflag, OPOST); CLR(cflag, CSIZE|PARENB); SET(cflag, CS8); lflag = 0; } else { CLR(lflag, ICANON); } goto out; } if (n == 0) goto out; #if 0 if (CB) SET(f, CRTBS); #endif if (CE) SET(lflag, ECHOE); else CLR(lflag, ECHOE); if (CK) SET(lflag, ECHOKE); else CLR(lflag, ECHOKE); if (PE) SET(lflag, ECHOPRT); else CLR(lflag, ECHOPRT); if (EC) SET(lflag, ECHO); else CLR(lflag, ECHO); if (XC) SET(lflag, ECHOCTL); else CLR(lflag, ECHOCTL); if (DX) SET(lflag, IXANY); else CLR(lflag, IXANY); out: tmode.c_iflag = iflag; tmode.c_oflag = oflag; tmode.c_cflag = cflag; tmode.c_lflag = lflag; }
static int ucomparam(struct tty *tp, struct termios *t) { struct ucom_softc *sc = device_lookup_private(&ucom_cd, UCOMUNIT(tp->t_dev)); int error; if (sc == NULL || sc->sc_dying) return (EIO); /* Check requested parameters. */ if (t->c_ispeed && t->c_ispeed != t->c_ospeed) return (EINVAL); /* * For the console, always force CLOCAL and !HUPCL, so that the port * is always active. */ if (ISSET(sc->sc_swflags, TIOCFLAG_SOFTCAR)) { SET(t->c_cflag, CLOCAL); CLR(t->c_cflag, HUPCL); } /* * If there were no changes, don't do anything. This avoids dropping * input and improves performance when all we did was frob things like * VMIN and VTIME. */ if (tp->t_ospeed == t->c_ospeed && tp->t_cflag == t->c_cflag) return (0); /* XXX lcr = ISSET(sc->sc_lcr, LCR_SBREAK) | cflag2lcr(t->c_cflag); */ /* And copy to tty. */ tp->t_ispeed = 0; tp->t_ospeed = t->c_ospeed; tp->t_cflag = t->c_cflag; if (sc->sc_methods->ucom_param != NULL) { error = sc->sc_methods->ucom_param(sc->sc_parent, sc->sc_portno, t); if (error) return (error); } /* XXX worry about CHWFLOW */ /* * Update the tty layer's idea of the carrier bit, in case we changed * CLOCAL or MDMBUF. We don't hang up here; we only do that by * explicit request. */ DPRINTF(("ucomparam: l_modem\n")); (void) (*tp->t_linesw->l_modem)(tp, ISSET(sc->sc_msr, UMSR_DCD)); #if 0 XXX what if the hardware is not open if (!ISSET(t->c_cflag, CHWFLOW)) { if (sc->sc_tx_stopped) { sc->sc_tx_stopped = 0; ucomstart(tp); } } #endif return (0); }
/* * Release a buffer on to the free lists. * Described in Bach (p. 46). */ void brelse(struct buf *bp) { struct bqueues *bufq; int s; /* Block disk interrupts. */ s = splbio(); /* * Determine which queue the buffer should be on, then put it there. */ /* If it's locked, don't report an error; try again later. */ if (ISSET(bp->b_flags, (B_LOCKED|B_ERROR)) == (B_LOCKED|B_ERROR)) CLR(bp->b_flags, B_ERROR); /* If it's not cacheable, or an error, mark it invalid. */ if (ISSET(bp->b_flags, (B_NOCACHE|B_ERROR))) SET(bp->b_flags, B_INVAL); if ((bp->b_bufsize <= 0) || ISSET(bp->b_flags, B_INVAL)) { /* * If it's invalid or empty, dissociate it from its vnode * and put on the head of the appropriate queue. */ if (LIST_FIRST(&bp->b_dep) != NULL) buf_deallocate(bp); if (ISSET(bp->b_flags, B_DELWRI)) { CLR(bp->b_flags, B_DELWRI); } if (bp->b_vp) { reassignbuf(bp); brelvp(bp); } if (bp->b_bufsize <= 0) { /* no data */ bufq = &bufqueues[BQ_EMPTY]; numemptybufs++; } else { /* invalid data */ bufq = &bufqueues[BQ_CLEAN]; numfreepages += btoc(bp->b_bufsize); numcleanpages += btoc(bp->b_bufsize); } binsheadfree(bp, bufq); } else { /* * It has valid data. Put it on the end of the appropriate * queue, so that it'll stick around for as long as possible. */ if (ISSET(bp->b_flags, B_LOCKED)) /* locked in core */ bufq = &bufqueues[BQ_LOCKED]; else { numfreepages += btoc(bp->b_bufsize); if (!ISSET(bp->b_flags, B_DELWRI)) { numcleanpages += btoc(bp->b_bufsize); bufq = &bufqueues[BQ_CLEAN]; } else { numdirtypages += btoc(bp->b_bufsize); bufq = &bufqueues[BQ_DIRTY]; } } if (ISSET(bp->b_flags, B_AGE)) binsheadfree(bp, bufq); else binstailfree(bp, bufq); } /* Unlock the buffer. */ CLR(bp->b_flags, (B_AGE | B_ASYNC | B_BUSY | B_NOCACHE | B_DEFERRED)); /* Wake up syncer and cleaner processes waiting for buffers */ if (nobuffers) { wakeup(&nobuffers); nobuffers = 0; } /* Wake up any processes waiting for any buffer to become free. */ if (needbuffer && (numcleanpages > locleanpages)) { needbuffer--; wakeup_one(&needbuffer); } splx(s); /* Wake up any processes waiting for _this_ buffer to become free. */ if (ISSET(bp->b_flags, B_WANTED)) { CLR(bp->b_flags, B_WANTED); wakeup(bp); } }
void spi_begin(void) { if (!spi_initialized) spi_init(); CLR(nSS); }
/* from f to t */ void bolt(int fx, int fy, int tx, int ty, int hit, int dmg, int dtype) { int xx,yy; struct monster *target; Symbol boltchar = '?'; xx = fx; yy = fy; switch(dtype) { case FLAME: boltchar=('*' | CLR(LIGHT_RED)); break; case ELECTRICITY: boltchar = ('^' | CLR(LIGHT_BLUE)); break; case NORMAL_DAMAGE: boltchar = ('!' | CLR(BROWN)); break; case COLD: boltchar=('o' | CLR(WHITE)); break; default: assert(FALSE); /* this should never happen, right? WDT */ } clearmsg(); do_los(boltchar,&xx,&yy,tx,ty); if ((xx == Player.x) && (yy == Player.y)) { if (Player.status[DEFLECTION] > 0) mprint("The bolt just missed you!"); else { switch (dtype) { case FLAME: mprint("You were blasted by a firebolt!"); p_damage(random_range(dmg),dtype,"a firebolt"); break; case ELECTRICITY: mprint("You were zapped by lightning!"); p_damage(random_range(dmg),dtype,"a bolt of lightning"); break; case NORMAL_DAMAGE: mprint("You were hit by a missile!"); p_damage(random_range(dmg),dtype,"a missile"); break; case COLD: mprint("You were hit by an icicle!"); p_damage(random_range(dmg),dtype,"an icicle"); break; } } } else if (NULL != (target = Level->site[xx][yy].creature)) { if (hitp(hit,target->ac)) { if (target->uniqueness == COMMON) { strcpy(Str1,"The "); strcat(Str1,target->monstring); } else strcpy(Str1,target->monstring); switch (dtype) { /* WDT: these sentances really ought to be livened up. Especially * in full verbose mode. */ case FLAME: strcat(Str1," was blasted by a firebolt!"); break; case ELECTRICITY: strcat(Str1," was zapped by lightning!"); break; case NORMAL_DAMAGE: strcat(Str1," was hit by a missile!"); break; case COLD: strcat(Str1," was hit by an icicle!"); break; } mprint(Str1); m_status_set(target,HOSTILE); m_damage(target,random_range(dmg),dtype); } else { if (target->uniqueness == COMMON) { strcpy(Str1,"The "); strcat(Str1,target->monstring); } else strcpy(Str1,target->monstring); switch (dtype) { case FLAME: strcat(Str1," was missed by a firebolt!"); break; case ELECTRICITY: strcat(Str1," was missed by lightning!"); break; case NORMAL_DAMAGE: strcat(Str1," was missed by a missile!"); break; case COLD: strcat(Str1," was missed by a flying icicle!"); break; } mprint(Str1); } } else if (Level->site[xx][yy].locchar == HEDGE) if (Level->site[xx][yy].p_locf != L_TRIFID) { if ((dtype == FLAME)||(dtype == ELECTRICITY)) { mprint("The hedge is blasted away!"); Level->site[xx][yy].p_locf = L_NO_OP; Level->site[xx][yy].locchar = FLOOR; plotspot(xx, yy, TRUE); lset(xx, yy, CHANGED); } else mprint("The hedge is unaffected."); } else mprint("The trifid absorbs the energy and laughs!"); else if (Level->site[xx][yy].locchar == WATER) if (dtype == FLAME) { mprint("The water is vaporised!"); Level->site[xx][yy].p_locf = L_NO_OP; Level->site[xx][yy].locchar = FLOOR; lset(xx, yy, CHANGED); } }
/* * sdstart looks to see if there is a buf waiting for the device * and that the device is not already busy. If both are true, * It dequeues the buf and creates a scsi command to perform the * transfer in the buf. The transfer request will call scsi_done * on completion, which will in turn call this routine again * so that the next queued transfer is performed. * The bufs are queued by the strategy routine (sdstrategy) * * This routine is also called after other non-queued requests * have been made of the scsi driver, to ensure that the queue * continues to be drained. */ void sdstart(struct scsi_xfer *xs) { struct scsi_link *link = xs->sc_link; struct sd_softc *sc = link->device_softc; struct buf *bp; u_int64_t secno; int nsecs; int read; struct partition *p; if (sc->flags & SDF_DYING) { scsi_xs_put(xs); return; } if ((link->flags & SDEV_MEDIA_LOADED) == 0) { bufq_drain(&sc->sc_bufq); scsi_xs_put(xs); return; } bp = bufq_dequeue(&sc->sc_bufq); if (bp == NULL) { scsi_xs_put(xs); return; } secno = DL_BLKTOSEC(sc->sc_dk.dk_label, bp->b_blkno); p = &sc->sc_dk.dk_label->d_partitions[DISKPART(bp->b_dev)]; secno += DL_GETPOFFSET(p); nsecs = howmany(bp->b_bcount, sc->sc_dk.dk_label->d_secsize); read = bp->b_flags & B_READ; /* * Fill out the scsi command. If the transfer will * fit in a "small" cdb, use it. */ if (!(link->flags & SDEV_ATAPI) && !(link->quirks & SDEV_ONLYBIG) && ((secno & 0x1fffff) == secno) && ((nsecs & 0xff) == nsecs)) sd_cmd_rw6(xs, read, secno, nsecs); else if (((secno & 0xffffffff) == secno) && ((nsecs & 0xffff) == nsecs)) sd_cmd_rw10(xs, read, secno, nsecs); else if (((secno & 0xffffffff) == secno) && ((nsecs & 0xffffffff) == nsecs)) sd_cmd_rw12(xs, read, secno, nsecs); else sd_cmd_rw16(xs, read, secno, nsecs); xs->flags |= (read ? SCSI_DATA_IN : SCSI_DATA_OUT); xs->timeout = 60000; xs->data = bp->b_data; xs->datalen = bp->b_bcount; xs->done = sd_buf_done; xs->cookie = bp; xs->bp = bp; /* Instrumentation. */ disk_busy(&sc->sc_dk); /* Mark disk as dirty. */ if (!read) sc->flags |= SDF_DIRTY; scsi_xs_exec(xs); /* move onto the next io */ if (ISSET(sc->flags, SDF_WAITING)) CLR(sc->flags, SDF_WAITING); else if (bufq_peek(&sc->sc_bufq)) scsi_xsh_add(&sc->sc_xsh); }
int sd_ioctl_cache(struct sd_softc *sc, long cmd, struct dk_cache *dkc) { union scsi_mode_sense_buf *buf; struct page_caching_mode *mode = NULL; u_int wrcache, rdcache; int big; int rv; if (ISSET(sc->sc_link->flags, SDEV_UMASS)) return (EOPNOTSUPP); /* see if the adapter has special handling */ rv = scsi_do_ioctl(sc->sc_link, cmd, (caddr_t)dkc, 0); if (rv != ENOTTY) return (rv); buf = dma_alloc(sizeof(*buf), PR_WAITOK); if (buf == NULL) return (ENOMEM); rv = scsi_do_mode_sense(sc->sc_link, PAGE_CACHING_MODE, buf, (void **)&mode, NULL, NULL, NULL, sizeof(*mode) - 4, scsi_autoconf | SCSI_SILENT, &big); if (rv != 0) goto done; if ((mode == NULL) || (!DISK_PGCODE(mode, PAGE_CACHING_MODE))) { rv = EIO; goto done; } wrcache = (ISSET(mode->flags, PG_CACHE_FL_WCE) ? 1 : 0); rdcache = (ISSET(mode->flags, PG_CACHE_FL_RCD) ? 0 : 1); switch (cmd) { case DIOCGCACHE: dkc->wrcache = wrcache; dkc->rdcache = rdcache; break; case DIOCSCACHE: if (dkc->wrcache == wrcache && dkc->rdcache == rdcache) break; if (dkc->wrcache) SET(mode->flags, PG_CACHE_FL_WCE); else CLR(mode->flags, PG_CACHE_FL_WCE); if (dkc->rdcache) CLR(mode->flags, PG_CACHE_FL_RCD); else SET(mode->flags, PG_CACHE_FL_RCD); if (big) { rv = scsi_mode_select_big(sc->sc_link, SMS_PF, &buf->hdr_big, scsi_autoconf | SCSI_SILENT, 20000); } else { rv = scsi_mode_select(sc->sc_link, SMS_PF, &buf->hdr, scsi_autoconf | SCSI_SILENT, 20000); } break; } done: dma_free(buf, sizeof(*buf)); return (rv); }
/* * Fill out the disk parameter structure. Return SDGP_RESULT_OK if the * structure is correctly filled in, SDGP_RESULT_OFFLINE otherwise. The caller * is responsible for clearing the SDEV_MEDIA_LOADED flag if the structure * cannot be completed. */ int sd_get_parms(struct sd_softc *sc, struct disk_parms *dp, int flags) { union scsi_mode_sense_buf *buf = NULL; struct page_rigid_geometry *rigid = NULL; struct page_flex_geometry *flex = NULL; struct page_reduced_geometry *reduced = NULL; u_char *page0 = NULL; u_int32_t heads = 0, sectors = 0, cyls = 0, secsize = 0; int err = 0, big; if (sd_size(sc, flags) != 0) return (SDGP_RESULT_OFFLINE); if (ISSET(sc->flags, SDF_THIN) && sd_thin_params(sc, flags) != 0) { /* we dont know the unmap limits, so we cant use thin shizz */ CLR(sc->flags, SDF_THIN); } buf = dma_alloc(sizeof(*buf), PR_NOWAIT); if (buf == NULL) goto validate; /* * Ask for page 0 (vendor specific) mode sense data to find * READONLY info. The only thing USB devices will ask for. */ err = scsi_do_mode_sense(sc->sc_link, 0, buf, (void **)&page0, NULL, NULL, NULL, 1, flags | SCSI_SILENT, &big); if (err == 0) { if (big && buf->hdr_big.dev_spec & SMH_DSP_WRITE_PROT) SET(sc->sc_link->flags, SDEV_READONLY); else if (!big && buf->hdr.dev_spec & SMH_DSP_WRITE_PROT) SET(sc->sc_link->flags, SDEV_READONLY); else CLR(sc->sc_link->flags, SDEV_READONLY); } /* * Many UMASS devices choke when asked about their geometry. Most * don't have a meaningful geometry anyway, so just fake it if * scsi_size() worked. */ if ((sc->sc_link->flags & SDEV_UMASS) && (dp->disksize > 0)) goto validate; switch (sc->sc_link->inqdata.device & SID_TYPE) { case T_OPTICAL: /* No more information needed or available. */ break; case T_RDIRECT: /* T_RDIRECT supports only PAGE_REDUCED_GEOMETRY (6). */ err = scsi_do_mode_sense(sc->sc_link, PAGE_REDUCED_GEOMETRY, buf, (void **)&reduced, NULL, NULL, &secsize, sizeof(*reduced), flags | SCSI_SILENT, NULL); if (!err && reduced && DISK_PGCODE(reduced, PAGE_REDUCED_GEOMETRY)) { if (dp->disksize == 0) dp->disksize = _5btol(reduced->sectors); if (secsize == 0) secsize = _2btol(reduced->bytes_s); } break; default: /* * NOTE: Some devices leave off the last four bytes of * PAGE_RIGID_GEOMETRY and PAGE_FLEX_GEOMETRY mode sense pages. * The only information in those four bytes is RPM information * so accept the page. The extra bytes will be zero and RPM will * end up with the default value of 3600. */ if (((sc->sc_link->flags & SDEV_ATAPI) == 0) || ((sc->sc_link->flags & SDEV_REMOVABLE) == 0)) err = scsi_do_mode_sense(sc->sc_link, PAGE_RIGID_GEOMETRY, buf, (void **)&rigid, NULL, NULL, &secsize, sizeof(*rigid) - 4, flags | SCSI_SILENT, NULL); if (!err && rigid && DISK_PGCODE(rigid, PAGE_RIGID_GEOMETRY)) { heads = rigid->nheads; cyls = _3btol(rigid->ncyl); if (heads * cyls > 0) sectors = dp->disksize / (heads * cyls); } else { err = scsi_do_mode_sense(sc->sc_link, PAGE_FLEX_GEOMETRY, buf, (void **)&flex, NULL, NULL, &secsize, sizeof(*flex) - 4, flags | SCSI_SILENT, NULL); if (!err && flex && DISK_PGCODE(flex, PAGE_FLEX_GEOMETRY)) { sectors = flex->ph_sec_tr; heads = flex->nheads; cyls = _2btol(flex->ncyl); if (secsize == 0) secsize = _2btol(flex->bytes_s); if (dp->disksize == 0) dp->disksize = heads * cyls * sectors; } } break; } validate: if (buf) dma_free(buf, sizeof(*buf)); if (dp->disksize == 0) return (SDGP_RESULT_OFFLINE); if (dp->secsize == 0) dp->secsize = (secsize == 0) ? 512 : secsize; /* * Restrict secsize values to powers of two between 512 and 64k. */ switch (dp->secsize) { case 0x200: /* == 512, == DEV_BSIZE on all architectures. */ case 0x400: case 0x800: case 0x1000: case 0x2000: case 0x4000: case 0x8000: case 0x10000: break; default: SC_DEBUG(sc->sc_link, SDEV_DB1, ("sd_get_parms: bad secsize: %#x\n", dp->secsize)); return (SDGP_RESULT_OFFLINE); } /* * XXX THINK ABOUT THIS!! Using values such that sectors * heads * * cyls is <= disk_size can lead to wasted space. We need a more * careful calculation/validation to make everything work out * optimally. */ if (dp->disksize > 0xffffffff && (dp->heads * dp->sectors) < 0xffff) { dp->heads = 511; dp->sectors = 255; cyls = 0; } else { /* * Use standard geometry values for anything we still don't * know. */ dp->heads = (heads == 0) ? 255 : heads; dp->sectors = (sectors == 0) ? 63 : sectors; } dp->cyls = (cyls == 0) ? dp->disksize / (dp->heads * dp->sectors) : cyls; if (dp->cyls == 0) { dp->heads = dp->cyls = 1; dp->sectors = dp->disksize; } return (SDGP_RESULT_OK); }
/* * Old TTY => termios, snatched from <sys/kern/tty_compat.c> */ void compatflags(long flags) { tcflag_t iflag, oflag, cflag, lflag; iflag = BRKINT|ICRNL|IMAXBEL|IXON|IXANY; oflag = OPOST|ONLCR|OXTABS; cflag = CREAD; lflag = ICANON|ISIG|IEXTEN; if (ISSET(flags, TANDEM)) SET(iflag, IXOFF); else CLR(iflag, IXOFF); if (ISSET(flags, ECHO)) SET(lflag, ECHO); else CLR(lflag, ECHO); if (ISSET(flags, CRMOD)) { SET(iflag, ICRNL); SET(oflag, ONLCR); } else { CLR(iflag, ICRNL); CLR(oflag, ONLCR); } if (ISSET(flags, XTABS)) SET(oflag, OXTABS); else CLR(oflag, OXTABS); if (ISSET(flags, RAW)) { iflag &= IXOFF; CLR(lflag, ISIG|ICANON|IEXTEN); CLR(cflag, PARENB); } else { SET(iflag, BRKINT|IXON|IMAXBEL); SET(lflag, ISIG|IEXTEN); if (ISSET(flags, CBREAK)) CLR(lflag, ICANON); else SET(lflag, ICANON); switch (ISSET(flags, ANYP)) { case 0: CLR(cflag, PARENB); break; case ANYP: SET(cflag, PARENB); CLR(iflag, INPCK); break; case EVENP: SET(cflag, PARENB); SET(iflag, INPCK); CLR(cflag, PARODD); break; case ODDP: SET(cflag, PARENB); SET(iflag, INPCK); SET(cflag, PARODD); break; } } /* Nothing we can do with CRTBS. */ if (ISSET(flags, PRTERA)) SET(lflag, ECHOPRT); else CLR(lflag, ECHOPRT); if (ISSET(flags, CRTERA)) SET(lflag, ECHOE); else CLR(lflag, ECHOE); /* Nothing we can do with TILDE. */ if (ISSET(flags, MDMBUF)) SET(cflag, MDMBUF); else CLR(cflag, MDMBUF); if (ISSET(flags, NOHANG)) CLR(cflag, HUPCL); else SET(cflag, HUPCL); if (ISSET(flags, CRTKIL)) SET(lflag, ECHOKE); else CLR(lflag, ECHOKE); if (ISSET(flags, CTLECH)) SET(lflag, ECHOCTL); else CLR(lflag, ECHOCTL); if (!ISSET(flags, DECCTQ)) SET(iflag, IXANY); else CLR(iflag, IXANY); CLR(lflag, TOSTOP|FLUSHO|PENDIN|NOFLSH); SET(lflag, ISSET(flags, TOSTOP|FLUSHO|PENDIN|NOFLSH)); if (ISSET(flags, RAW|LITOUT|PASS8)) { CLR(cflag, CSIZE); SET(cflag, CS8); if (!ISSET(flags, RAW|PASS8)) SET(iflag, ISTRIP); else CLR(iflag, ISTRIP); if (!ISSET(flags, RAW|LITOUT)) SET(oflag, OPOST); else CLR(oflag, OPOST); } else { CLR(cflag, CSIZE); SET(cflag, CS7); SET(iflag, ISTRIP); SET(oflag, OPOST); } tmode.c_iflag = iflag; tmode.c_oflag = oflag; tmode.c_cflag = cflag; tmode.c_lflag = lflag; }
void spif_softintr(void *vsc) { struct spif_softc *sc = (struct spif_softc *)vsc; struct stty_softc *stc = sc->sc_ttys; int i, data, s, flags; uint8_t stat, msvr; struct stty_port *sp; struct tty *tp; if (stc != NULL) { for (i = 0; i < stc->sc_nports; i++) { sp = &stc->sc_port[i]; tp = sp->sp_tty; if (!ISSET(tp->t_state, TS_ISOPEN)) continue; while (sp->sp_rget != sp->sp_rput) { stat = sp->sp_rget[0]; data = sp->sp_rget[1]; sp->sp_rget += 2; if (sp->sp_rget == sp->sp_rend) sp->sp_rget = sp->sp_rbuf; if (stat & (CD180_RCSR_BE | CD180_RCSR_FE)) data |= TTY_FE; if (stat & CD180_RCSR_PE) data |= TTY_PE; (*tp->t_linesw->l_rint)(data, tp); } s = splhigh(); flags = sp->sp_flags; CLR(sp->sp_flags, STTYF_DONE | STTYF_CDCHG | STTYF_RING_OVERFLOW); splx(s); if (ISSET(flags, STTYF_CDCHG)) { s = spltty(); STC_WRITE(sc, STC_CAR, i); msvr = STC_READ(sc, STC_MSVR); splx(s); sp->sp_carrier = msvr & CD180_MSVR_CD; (*tp->t_linesw->l_modem)(tp, sp->sp_carrier); } if (ISSET(flags, STTYF_RING_OVERFLOW)) { log(LOG_WARNING, "%s-%x: ring overflow\n", device_xname(stc->sc_dev), i); } if (ISSET(flags, STTYF_DONE)) { ndflush(&tp->t_outq, sp->sp_txp - tp->t_outq.c_cf); CLR(tp->t_state, TS_BUSY); (*tp->t_linesw->l_start)(tp); } } } }
int ucom_detach(device_t self, int flags) { struct ucom_softc *sc = device_private(self); struct tty *tp = sc->sc_tty; int maj, mn; int s, i; DPRINTF(("ucom_detach: sc=%p flags=%d tp=%p, pipe=%d,%d\n", sc, flags, tp, sc->sc_bulkin_no, sc->sc_bulkout_no)); sc->sc_dying = 1; pmf_device_deregister(self); if (sc->sc_bulkin_pipe != NULL) usbd_abort_pipe(sc->sc_bulkin_pipe); if (sc->sc_bulkout_pipe != NULL) usbd_abort_pipe(sc->sc_bulkout_pipe); s = splusb(); if (--sc->sc_refcnt >= 0) { /* Wake up anyone waiting */ if (tp != NULL) { mutex_spin_enter(&tty_lock); CLR(tp->t_state, TS_CARR_ON); CLR(tp->t_cflag, CLOCAL | MDMBUF); ttyflush(tp, FREAD|FWRITE); mutex_spin_exit(&tty_lock); } /* Wait for processes to go away. */ usb_detach_waitold(sc->sc_dev); } softint_disestablish(sc->sc_si); splx(s); /* locate the major number */ maj = cdevsw_lookup_major(&ucom_cdevsw); /* Nuke the vnodes for any open instances. */ mn = device_unit(self); DPRINTF(("ucom_detach: maj=%d mn=%d\n", maj, mn)); vdevgone(maj, mn, mn, VCHR); vdevgone(maj, mn | UCOMDIALOUT_MASK, mn | UCOMDIALOUT_MASK, VCHR); vdevgone(maj, mn | UCOMCALLUNIT_MASK, mn | UCOMCALLUNIT_MASK, VCHR); /* Detach and free the tty. */ if (tp != NULL) { tty_detach(tp); tty_free(tp); sc->sc_tty = NULL; } for (i = 0; i < UCOM_IN_BUFFS; i++) { if (sc->sc_ibuff[i].ub_xfer != NULL) usbd_free_xfer(sc->sc_ibuff[i].ub_xfer); } for (i = 0; i < UCOM_OUT_BUFFS; i++) { if (sc->sc_obuff[i].ub_xfer != NULL) usbd_free_xfer(sc->sc_obuff[i].ub_xfer); } /* Detach the random source */ rnd_detach_source(&sc->sc_rndsource); return (0); }
/* from f to t */ void ball(int fx, int fy, int tx, int ty, int dmg, int dtype) { int xx,yy,ex,ey,i; struct monster *target; Symbol expchar=('@' | CLR(LIGHT_PURPLE)); xx = fx; yy = fy; switch(dtype) { case FLAME: expchar=('*' | CLR(LIGHT_RED)); break; case COLD: expchar=('o' | CLR(WHITE)); break; case ELECTRICITY: expchar=('^' | CLR(LIGHT_BLUE)); break; } do_los(expchar,&xx,&yy,tx,ty); draw_explosion(expchar,xx,yy); for(i=0; i<9; i++) { ex = xx + Dirs[0][i]; ey = yy + Dirs[1][i]; if ((ex == Player.x) && (ey == Player.y)) { switch(dtype) { case FLAME: mprint("You were blasted by a fireball!"); p_damage(random_range(dmg),FLAME,"a fireball"); break; case COLD: mprint("You were blasted by a snowball!"); p_damage(random_range(dmg),COLD,"a snowball"); break; case ELECTRICITY: mprint("You were blasted by ball lightning!"); p_damage(random_range(dmg),ELECTRICITY,"ball lightning"); break; case UNSTOPPABLE: mprint("Oh No! Manastorm!"); p_damage(random_range(dmg),UNSTOPPABLE,"a manastorm!"); break; } } if (NULL != (target = Level->site[ex][ey].creature)) { if (los_p(Player.x,Player.y,target->x,target->y)) { if (target->uniqueness == COMMON) { strcpy(Str1,"The "); strcat(Str1,target->monstring); } else strcpy(Str1,target->monstring); switch(dtype) { case FLAME: strcat(Str1," was zorched by a fireball!"); break; case COLD: strcat(Str1," was blasted by a snowball!"); break; case ELECTRICITY: strcat(Str1," was zapped by ball lightning!"); break; case UNSTOPPABLE: strcat(Str1," was nuked by a manastorm!"); break; } mprint(Str1); } m_status_set(target,HOSTILE); m_damage(target,random_range(dmg),dtype); } if (Level->site[ex][ey].locchar == HEDGE) if (Level->site[ex][ey].p_locf != L_TRIFID) { if ((dtype == FLAME)||(dtype == ELECTRICITY)) { mprint("The hedge is blasted away!"); Level->site[ex][ey].p_locf = L_NO_OP; Level->site[ex][ey].locchar = FLOOR; plotspot(ex,ey,TRUE); lset(ex, ey, CHANGED); } else mprint("The hedge is unaffected."); } else mprint("The trifid absorbs the energy and laughs!"); else if (Level->site[ex][ey].locchar == WATER) if (dtype == FLAME) { mprint("The water is vaporised!"); Level->site[ex][ey].p_locf = L_NO_OP; Level->site[ex][ey].locchar = FLOOR; plotspot(ex,ey,TRUE); lset(ex, ey, CHANGED); } } }
int ucomopen(dev_t dev, int flag, int mode, struct lwp *l) { int unit = UCOMUNIT(dev); usbd_status err; struct ucom_softc *sc = device_lookup_private(&ucom_cd, unit); struct ucom_buffer *ub; struct tty *tp; int s, i; int error; if (sc == NULL) return (ENXIO); if (sc->sc_dying) return (EIO); if (!device_is_active(sc->sc_dev)) return (ENXIO); tp = sc->sc_tty; DPRINTF(("ucomopen: unit=%d, tp=%p\n", unit, tp)); if (kauth_authorize_device_tty(l->l_cred, KAUTH_DEVICE_TTY_OPEN, tp)) return (EBUSY); s = spltty(); /* * Do the following iff this is a first open. */ while (sc->sc_opening) tsleep(&sc->sc_opening, PRIBIO, "ucomop", 0); if (sc->sc_dying) { splx(s); return (EIO); } sc->sc_opening = 1; if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { struct termios t; tp->t_dev = dev; if (sc->sc_methods->ucom_open != NULL) { error = sc->sc_methods->ucom_open(sc->sc_parent, sc->sc_portno); if (error) { ucom_cleanup(sc); sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); return (error); } } ucom_status_change(sc); /* Clear PPS capture state on first open. */ mutex_spin_enter(&timecounter_lock); memset(&sc->sc_pps_state, 0, sizeof(sc->sc_pps_state)); sc->sc_pps_state.ppscap = PPS_CAPTUREASSERT | PPS_CAPTURECLEAR; pps_init(&sc->sc_pps_state); mutex_spin_exit(&timecounter_lock); /* * Initialize the termios status to the defaults. Add in the * sticky bits from TIOCSFLAGS. */ t.c_ispeed = 0; t.c_ospeed = TTYDEF_SPEED; t.c_cflag = TTYDEF_CFLAG; if (ISSET(sc->sc_swflags, TIOCFLAG_CLOCAL)) SET(t.c_cflag, CLOCAL); if (ISSET(sc->sc_swflags, TIOCFLAG_CRTSCTS)) SET(t.c_cflag, CRTSCTS); if (ISSET(sc->sc_swflags, TIOCFLAG_MDMBUF)) SET(t.c_cflag, MDMBUF); /* Make sure ucomparam() will do something. */ tp->t_ospeed = 0; (void) ucomparam(tp, &t); tp->t_iflag = TTYDEF_IFLAG; tp->t_oflag = TTYDEF_OFLAG; tp->t_lflag = TTYDEF_LFLAG; ttychars(tp); ttsetwater(tp); /* * Turn on DTR. We must always do this, even if carrier is not * present, because otherwise we'd have to use TIOCSDTR * immediately after setting CLOCAL, which applications do not * expect. We always assert DTR while the device is open * unless explicitly requested to deassert it. Ditto RTS. */ ucom_dtr(sc, 1); ucom_rts(sc, 1); DPRINTF(("ucomopen: open pipes in=%d out=%d\n", sc->sc_bulkin_no, sc->sc_bulkout_no)); /* Open the bulk pipes */ err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkin_no, USBD_EXCLUSIVE_USE, &sc->sc_bulkin_pipe); if (err) { DPRINTF(("%s: open bulk in error (addr %d), err=%s\n", device_xname(sc->sc_dev), sc->sc_bulkin_no, usbd_errstr(err))); error = EIO; goto fail_0; } err = usbd_open_pipe(sc->sc_iface, sc->sc_bulkout_no, USBD_EXCLUSIVE_USE, &sc->sc_bulkout_pipe); if (err) { DPRINTF(("%s: open bulk out error (addr %d), err=%s\n", device_xname(sc->sc_dev), sc->sc_bulkout_no, usbd_errstr(err))); error = EIO; goto fail_1; } sc->sc_rx_unblock = 0; sc->sc_rx_stopped = 0; sc->sc_tx_stopped = 0; memset(sc->sc_ibuff, 0, sizeof(sc->sc_ibuff)); memset(sc->sc_obuff, 0, sizeof(sc->sc_obuff)); SIMPLEQ_INIT(&sc->sc_ibuff_empty); SIMPLEQ_INIT(&sc->sc_ibuff_full); SIMPLEQ_INIT(&sc->sc_obuff_free); SIMPLEQ_INIT(&sc->sc_obuff_full); /* Allocate input buffers */ for (ub = &sc->sc_ibuff[0]; ub != &sc->sc_ibuff[UCOM_IN_BUFFS]; ub++) { ub->ub_xfer = usbd_alloc_xfer(sc->sc_udev); if (ub->ub_xfer == NULL) { error = ENOMEM; goto fail_2; } ub->ub_data = usbd_alloc_buffer(ub->ub_xfer, sc->sc_ibufsizepad); if (ub->ub_data == NULL) { error = ENOMEM; goto fail_2; } if (ucomsubmitread(sc, ub) != USBD_NORMAL_COMPLETION) { error = EIO; goto fail_2; } } for (ub = &sc->sc_obuff[0]; ub != &sc->sc_obuff[UCOM_OUT_BUFFS]; ub++) { ub->ub_xfer = usbd_alloc_xfer(sc->sc_udev); if (ub->ub_xfer == NULL) { error = ENOMEM; goto fail_2; } ub->ub_data = usbd_alloc_buffer(ub->ub_xfer, sc->sc_obufsize); if (ub->ub_data == NULL) { error = ENOMEM; goto fail_2; } SIMPLEQ_INSERT_TAIL(&sc->sc_obuff_free, ub, ub_link); } } sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); error = ttyopen(tp, UCOMDIALOUT(dev), ISSET(flag, O_NONBLOCK)); if (error) goto bad; error = (*tp->t_linesw->l_open)(dev, tp); if (error) goto bad; return (0); fail_2: usbd_abort_pipe(sc->sc_bulkin_pipe); for (i = 0; i < UCOM_IN_BUFFS; i++) { if (sc->sc_ibuff[i].ub_xfer != NULL) { usbd_free_xfer(sc->sc_ibuff[i].ub_xfer); sc->sc_ibuff[i].ub_xfer = NULL; sc->sc_ibuff[i].ub_data = NULL; } } usbd_abort_pipe(sc->sc_bulkout_pipe); for (i = 0; i < UCOM_OUT_BUFFS; i++) { if (sc->sc_obuff[i].ub_xfer != NULL) { usbd_free_xfer(sc->sc_obuff[i].ub_xfer); sc->sc_obuff[i].ub_xfer = NULL; sc->sc_obuff[i].ub_data = NULL; } } usbd_close_pipe(sc->sc_bulkout_pipe); sc->sc_bulkout_pipe = NULL; fail_1: usbd_close_pipe(sc->sc_bulkin_pipe); sc->sc_bulkin_pipe = NULL; fail_0: sc->sc_opening = 0; wakeup(&sc->sc_opening); splx(s); return (error); bad: s = spltty(); CLR(tp->t_state, TS_BUSY); if (!ISSET(tp->t_state, TS_ISOPEN) && tp->t_wopen == 0) { /* * We failed to open the device, and nobody else had it opened. * Clean up the state as appropriate. */ ucom_cleanup(sc); } splx(s); return (error); }
INLINE void CALCULATE_Z_FLAG(void) { if (R.ALU == 0) SET(Z_FLAG); else CLR(Z_FLAG); }
/* * Block write. Described in Bach (p.56) */ int bwrite(struct buf *bp) { int rv, async, wasdelayed, s; struct vnode *vp; struct mount *mp; /* * Remember buffer type, to switch on it later. If the write was * synchronous, but the file system was mounted with MNT_ASYNC, * convert it to a delayed write. * XXX note that this relies on delayed tape writes being converted * to async, not sync writes (which is safe, but ugly). */ async = ISSET(bp->b_flags, B_ASYNC); if (!async && bp->b_vp && bp->b_vp->v_mount && ISSET(bp->b_vp->v_mount->mnt_flag, MNT_ASYNC)) { bdwrite(bp); return (0); } /* * Collect statistics on synchronous and asynchronous writes. * Writes to block devices are charged to their associated * filesystem (if any). */ if ((vp = bp->b_vp) != NULL) { if (vp->v_type == VBLK) mp = vp->v_specmountpoint; else mp = vp->v_mount; if (mp != NULL) { if (async) mp->mnt_stat.f_asyncwrites++; else mp->mnt_stat.f_syncwrites++; } } wasdelayed = ISSET(bp->b_flags, B_DELWRI); CLR(bp->b_flags, (B_READ | B_DONE | B_ERROR | B_DELWRI)); s = splbio(); /* * If not synchronous, pay for the I/O operation and make * sure the buf is on the correct vnode queue. We have * to do this now, because if we don't, the vnode may not * be properly notified that its I/O has completed. */ if (wasdelayed) { reassignbuf(bp); } else curproc->p_stats->p_ru.ru_oublock++; /* Initiate disk write. Make sure the appropriate party is charged. */ bp->b_vp->v_numoutput++; splx(s); SET(bp->b_flags, B_WRITEINPROG); VOP_STRATEGY(bp); if (async) return (0); /* * If I/O was synchronous, wait for it to complete. */ rv = biowait(bp); /* Release the buffer. */ brelse(bp); return (rv); }
/* * Routine: macx_swapon * Function: * Syscall interface to add a file to backing store */ int macx_swapon( struct macx_swapon_args *args) { int size = args->size; vnode_t vp = (vnode_t)NULL; struct nameidata nd, *ndp; register int error; kern_return_t kr; mach_port_t backing_store; memory_object_default_t default_pager; int i; boolean_t funnel_state; off_t file_size; vfs_context_t ctx = vfs_context_current(); struct proc *p = current_proc(); int dp_cluster_size; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON); AUDIT_ARG(value32, args->priority); funnel_state = thread_funnel_set(kernel_flock, TRUE); ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapon_bailout; /* * Get a vnode for the paging area. */ NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapon_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapon_bailout; } /* get file size */ if ((error = vnode_size(vp, &file_size, ctx)) != 0) goto swapon_bailout; #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapon(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapon_bailout; #endif /* resize to desired size if it's too small */ if ((file_size < (off_t)size) && ((error = vnode_setsize(vp, (off_t)size, 0, ctx)) != 0)) goto swapon_bailout; #if CONFIG_PROTECT { /* initialize content protection keys manually */ if ((error = cp_handle_vnop(vp, CP_WRITE_ACCESS, 0)) != 0) { goto swapon_bailout; } } #endif if (default_pager_init_flag == 0) { start_def_pager(NULL); default_pager_init_flag = 1; } /* add new backing store to list */ i = 0; while(bs_port_table[i].vp != 0) { if(i == MAX_BACKING_STORE) break; i++; } if(i == MAX_BACKING_STORE) { error = ENOMEM; goto swapon_bailout; } /* remember the vnode. This vnode has namei() reference */ bs_port_table[i].vp = vp; /* * Look to see if we are already paging to this file. */ /* make certain the copy send of kernel call will work */ default_pager = MEMORY_OBJECT_DEFAULT_NULL; kr = host_default_memory_manager(host_priv_self(), &default_pager, 0); if(kr != KERN_SUCCESS) { error = EAGAIN; bs_port_table[i].vp = 0; goto swapon_bailout; } #if CONFIG_EMBEDDED dp_cluster_size = 1 * PAGE_SIZE; #else if ((dp_isssd = vnode_pager_isSSD(vp)) == TRUE) { /* * keep the cluster size small since the * seek cost is effectively 0 which means * we don't care much about fragmentation */ dp_cluster_size = 2 * PAGE_SIZE; } else { /* * use the default cluster size */ dp_cluster_size = 0; } #endif kr = default_pager_backing_store_create(default_pager, -1, /* default priority */ dp_cluster_size, &backing_store); memory_object_default_deallocate(default_pager); if(kr != KERN_SUCCESS) { error = ENOMEM; bs_port_table[i].vp = 0; goto swapon_bailout; } /* Mark this vnode as being used for swapfile */ vnode_lock_spin(vp); SET(vp->v_flag, VSWAP); vnode_unlock(vp); /* * NOTE: we are able to supply PAGE_SIZE here instead of * an actual record size or block number because: * a: we do not support offsets from the beginning of the * file (allowing for non page size/record modulo offsets. * b: because allow paging will be done modulo page size */ kr = default_pager_add_file(backing_store, (vnode_ptr_t) vp, PAGE_SIZE, (int)(file_size/PAGE_SIZE)); if(kr != KERN_SUCCESS) { bs_port_table[i].vp = 0; if(kr == KERN_INVALID_ARGUMENT) error = EINVAL; else error = ENOMEM; /* This vnode is not to be used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); goto swapon_bailout; } bs_port_table[i].bs = (void *)backing_store; error = 0; ubc_setthreadcred(vp, p, current_thread()); /* * take a long term reference on the vnode to keep * vnreclaim() away from this vnode. */ vnode_ref(vp); swapon_bailout: if (vp) { vnode_put(vp); } (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); if (error) printf("macx_swapon FAILED - %d\n", error); else printf("macx_swapon SUCCESS\n"); return(error); }
static void eint(void) { CLR(INTM_FLAG); }
/* * Routine: macx_swapoff * Function: * Syscall interface to remove a file from backing store */ int macx_swapoff( struct macx_swapoff_args *args) { __unused int flags = args->flags; kern_return_t kr; mach_port_t backing_store; struct vnode *vp = 0; struct nameidata nd, *ndp; struct proc *p = current_proc(); int i; int error; boolean_t funnel_state; vfs_context_t ctx = vfs_context_current(); struct uthread *ut; int orig_iopol_disk; AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPOFF); funnel_state = thread_funnel_set(kernel_flock, TRUE); backing_store = NULL; ndp = &nd; if ((error = suser(kauth_cred_get(), 0))) goto swapoff_bailout; /* * Get the vnode for the paging area. */ NDINIT(ndp, LOOKUP, OP_LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, ((IS_64BIT_PROCESS(p)) ? UIO_USERSPACE64 : UIO_USERSPACE32), (user_addr_t) args->filename, ctx); if ((error = namei(ndp))) goto swapoff_bailout; nameidone(ndp); vp = ndp->ni_vp; if (vp->v_type != VREG) { error = EINVAL; goto swapoff_bailout; } #if CONFIG_MACF vnode_lock(vp); error = mac_system_check_swapoff(vfs_context_ucred(ctx), vp); vnode_unlock(vp); if (error) goto swapoff_bailout; #endif for(i = 0; i < MAX_BACKING_STORE; i++) { if(bs_port_table[i].vp == vp) { break; } } if (i == MAX_BACKING_STORE) { error = EINVAL; goto swapoff_bailout; } backing_store = (mach_port_t)bs_port_table[i].bs; ut = get_bsdthread_info(current_thread()); orig_iopol_disk = proc_get_thread_selfdiskacc(); proc_apply_thread_selfdiskacc(IOPOL_THROTTLE); kr = default_pager_backing_store_delete(backing_store); proc_apply_thread_selfdiskacc(orig_iopol_disk); switch (kr) { case KERN_SUCCESS: error = 0; bs_port_table[i].vp = 0; /* This vnode is no longer used for swapfile */ vnode_lock_spin(vp); CLR(vp->v_flag, VSWAP); vnode_unlock(vp); /* get rid of macx_swapon() "long term" reference */ vnode_rele(vp); break; case KERN_FAILURE: error = EAGAIN; break; default: error = EAGAIN; break; } swapoff_bailout: /* get rid of macx_swapoff() namei() reference */ if (vp) vnode_put(vp); (void) thread_funnel_set(kernel_flock, FALSE); AUDIT_MACH_SYSCALL_EXIT(error); if (error) printf("macx_swapoff FAILED - %d\n", error); else printf("macx_swapoff SUCCESS\n"); return(error); }
__private_extern__ int nget_9p(mount_9p *nmp, fid_9p fid, qid_9p qid, vnode_t dvp, vnode_t *vpp, struct componentname *cnp, vfs_context_t ctx) { #pragma unused(ctx) struct vnode_fsparam fsp; struct hnode_9p *nhp; node_9p *np; uint32_t vid; int e, i; TRACE(); nhp = HASH9P(nmp, qid.path); loop: lck_mtx_lock(nmp->nodelck); LIST_FOREACH (np, nhp, next) { if(np->dir.qid.path != qid.path) continue; if (ISSET(np->flags, NODE_INIT)) { SET(np->flags, NODE_WAITINIT); msleep(np, nmp->nodelck, PINOD|PDROP, "nget_9p_init", NULL); goto loop; } if (ISSET(np->flags, NODE_RECL)) { SET(np->flags, NODE_WAITRECL); msleep(np, nmp->nodelck, PINOD|PDROP, "nget_9p_reclaim", NULL); goto loop; } vid = vnode_vid(np->vp); lck_mtx_unlock(nmp->nodelck); if (vnode_getwithvid(np->vp, vid)) goto loop; nlock_9p(np, NODE_LCK_EXCLUSIVE); if (dvp && cnp && ISSET(cnp->cn_flags, MAKEENTRY) && np->dir.qid.vers!=0) { // DEBUG("caching %s", np->dir->name); cache_enter(dvp, np->vp, cnp); } else { // DEBUG("not in cache qid=%d %s", qid.vers, np->dir->name); } *vpp = np->vp; return 0; } if (fid == NOFID) return EFAULT; np = malloc_9p(sizeof(*np)); if (np == NULL) { err0: lck_mtx_unlock(nmp->nodelck); return ENOMEM; } np->lck = lck_rw_alloc_init(lck_grp_9p, LCK_ATTR_NULL); if (np->lck == NULL) { free_9p(np); goto err0; } np->nmp = nmp; np->fid = fid; np->dir.qid = qid; for (i=0; i<3; i++) np->openfid[i].fid = NOFID; SET(np->flags, NODE_INIT); LIST_INSERT_HEAD(nhp, np, next); nlock_9p(np, NODE_LCK_EXCLUSIVE); lck_mtx_unlock(nmp->nodelck); if ((e=ngetdir_9p(np))) { err1: nunlock_9p(np); lck_mtx_lock(nmp->nodelck); LIST_REMOVE(np, next); CLR(np->flags, NODE_INIT); if (ISSET(np->flags, NODE_WAITINIT)) { CLR(np->flags, NODE_WAITINIT); wakeup(np); } lck_mtx_unlock(nmp->nodelck); lck_rw_free(np->lck, lck_grp_9p); free_9p(np); return e; } fsp.vnfs_mp = nmp->mp; fsp.vnfs_str = fsname; fsp.vnfs_dvp = dvp; fsp.vnfs_fsnode = np; fsp.vnfs_vops = vnode_op_9p; fsp.vnfs_markroot = dvp==NULL? TRUE: FALSE; fsp.vnfs_marksystem = FALSE; fsp.vnfs_filesize = np->dir.length; fsp.vnfs_cnp = cnp; fsp.vnfs_flags = VNFS_ADDFSREF; dirvtype_9p(&np->dir, ISSET(nmp->flags, F_DOTU), &fsp.vnfs_vtype, &fsp.vnfs_rdev); if (!dvp || !cnp || !ISSET(cnp->cn_flags, MAKEENTRY) || qid.vers==0) SET(fsp.vnfs_flags, VNFS_NOCACHE); if ((e=vnode_create(VNCREATE_FLAVOR, VCREATESIZE, &fsp, &np->vp))) goto err1; vnode_settag(np->vp, VT_OTHER); lck_mtx_lock(nmp->nodelck); CLR(np->flags, NODE_INIT); if (ISSET(np->flags, NODE_WAITINIT)) { CLR(np->flags, NODE_WAITINIT); wakeup(np); } lck_mtx_unlock(nmp->nodelck); *vpp = np->vp; return 0; }
FTSENT * yfts_read(FTS * sp) { FTSENT *p, *tmp; int instr; char *t; int saved_errno; ClearLastSystemError(); /* If finished or unrecoverable error, return NULL. */ if (sp->fts_cur == NULL || ISSET(FTS_STOP)) return (NULL); /* Set current node pointer. */ p = sp->fts_cur; /* Save and zero out user instructions. */ instr = p->fts_instr; p->fts_instr = FTS_NOINSTR; /* Any type of file may be re-visited; re-stat and re-turn. */ if (instr == FTS_AGAIN) { p->fts_info = fts_stat(sp, p, 0); return (p); } /* * Following a symlink -- SLNONE test allows application to see * SLNONE and recover. If indirecting through a symlink, have * keep a pointer to current location. If unable to get that * pointer, follow fails. */ if (instr == FTS_FOLLOW && (p->fts_info == FTS_SL || p->fts_info == FTS_SLNONE)) { p->fts_info = fts_stat(sp, p, 1); if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) { if (valid_dird(p->fts_symfd = get_cwdd())) { p->fts_errno = errno; p->fts_info = FTS_ERR; } else p->fts_flags |= FTS_SYMFOLLOW; } return (p); } /* Directory in pre-order. */ if (p->fts_info == FTS_D) { /* If skipped or crossed mount point, do post-order visit. */ if (instr == FTS_SKIP || (ISSET(FTS_XDEV) && p->fts_dev != sp->fts_dev)) { if (p->fts_flags & FTS_SYMFOLLOW) close_dird(p->fts_symfd); if (sp->fts_child) { fts_lfree(sp->fts_child); sp->fts_child = NULL; } p->fts_info = FTS_DP; return (p); } /* Rebuild if only read the names and now traversing. */ if (sp->fts_child && ISSET(FTS_NAMEONLY)) { CLR(FTS_NAMEONLY); fts_lfree(sp->fts_child); sp->fts_child = NULL; } /* * Cd to the subdirectory. * * If have already read and now fail to chdir, whack the list * to make the names come out right, and set the parent errno * so the application will eventually get an error condition. * Set the FTS_DONTCHDIR flag so that when we logically change * directories back to the parent we don't do a chdir. * * If haven't read do so. If the read fails, fts_build sets * FTS_STOP or the fts_info field of the node. */ if (sp->fts_child) { if (fts_safe_changedir(sp, p, -1, p->fts_accpath)) { p->fts_errno = errno; p->fts_flags |= FTS_DONTCHDIR; for (p = sp->fts_child; p; p = p->fts_link) p->fts_accpath = p->fts_parent->fts_accpath; } } else if ((sp->fts_child = fts_build(sp, BREAD)) == NULL) { if (ISSET(FTS_STOP)) return (NULL); return (p); } p = sp->fts_child; sp->fts_child = NULL; goto name; } /* Move to the next node on this level. */ next: tmp = p; if ((p = p->fts_link) != 0) { free(tmp); /* * If reached the top, return to the original directory (or * the root of the tree), and load the paths for the next root. */ if (p->fts_level == FTS_ROOTLEVEL) { if (FCHDIR(sp, sp->fts_rfd)) { SET(FTS_STOP); return (NULL); } fts_load(sp, p); return (sp->fts_cur = p); } /* * User may have called yfts_set on the node. If skipped, * ignore. If followed, get a file descriptor so we can * get back if necessary. */ if (p->fts_instr == FTS_SKIP) goto next; if (p->fts_instr == FTS_FOLLOW) { p->fts_info = fts_stat(sp, p, 1); if (p->fts_info == FTS_D && !ISSET(FTS_NOCHDIR)) { if (valid_dird(p->fts_symfd = get_cwdd())) { p->fts_errno = errno; p->fts_info = FTS_ERR; } else p->fts_flags |= FTS_SYMFOLLOW; } p->fts_instr = FTS_NOINSTR; } name: t = sp->fts_path + NAPPEND(p->fts_parent); *t++ = LOCSLASH_C; memmove(t, p->fts_name, (size_t)p->fts_namelen + 1); return (sp->fts_cur = p); } /* Move up to the parent node. */ p = tmp->fts_parent; free(tmp); if (p->fts_level == FTS_ROOTPARENTLEVEL) { /* * Done; free everything up and set errno to 0 so the user * can distinguish between error and EOF. */ free(p); errno = 0; return (sp->fts_cur = NULL); } /* NUL terminate the pathname. */ sp->fts_path[p->fts_pathlen] = '\0'; /* * Return to the parent directory. If at a root node or came through * a symlink, go back through the file descriptor. Otherwise, cd up * one directory. */ if (p->fts_level == FTS_ROOTLEVEL) { if (FCHDIR(sp, sp->fts_rfd)) { SET(FTS_STOP); return (NULL); } } else if (p->fts_flags & FTS_SYMFOLLOW) { if (FCHDIR(sp, p->fts_symfd)) { saved_errno = errno; close_dird(p->fts_symfd); errno = saved_errno; SET(FTS_STOP); return (NULL); } close_dird(p->fts_symfd); } else if (!(p->fts_flags & FTS_DONTCHDIR) && fts_safe_changedir(sp, p->fts_parent, -1, "..")) { SET(FTS_STOP); return (NULL); } p->fts_info = p->fts_errno ? FTS_ERR : FTS_DP; return (sp->fts_cur = p); }
static int at91usart_param(struct tty *tp, struct termios *t) { struct at91usart_softc *sc = device_lookup_private(&at91usart_cd, COMUNIT(tp->t_dev)); int s; if (COM_ISALIVE(sc) == 0) return (EIO); if (t->c_ispeed && t->c_ispeed != t->c_ospeed) return (EINVAL); /* * For the console, always force CLOCAL and !HUPCL, so that the port * is always active. */ if (ISSET(sc->sc_swflags, TIOCFLAG_SOFTCAR) || ISSET(sc->sc_hwflags, COM_HW_CONSOLE)) { SET(t->c_cflag, CLOCAL); CLR(t->c_cflag, HUPCL); } /* * If there were no changes, don't do anything. This avoids dropping * input and improves performance when all we did was frob things like * VMIN and VTIME. */ if (tp->t_ospeed == t->c_ospeed && tp->t_cflag == t->c_cflag) return (0); s = spltty(); sc->sc_brgr = (AT91_MSTCLK / 16 + t->c_ospeed / 2) / t->c_ospeed; /* And copy to tty. */ tp->t_ispeed = 0; tp->t_ospeed = t->c_ospeed; tp->t_cflag = t->c_cflag; at91usart_set(sc); splx(s); /* * Update the tty layer's idea of the carrier bit. * We tell tty the carrier is always on. */ (void) (*tp->t_linesw->l_modem)(tp, 1); #ifdef COM_DEBUG if (com_debug) comstatus(sc, "comparam "); #endif /* tell the upper layer about hwflow.. */ if (sc->hwflow) (*sc->hwflow)(sc, t->c_cflag); return (0); }
KEY(UNDO,"UNDO KEY"), KEY(MOUSE,"MOUSE EVENT HAS OCCURRED"), KEY(RESIZE,"TERMINAL RESIZE EVENT"), KEY(EVENT,"WE WERE INTERRUPTED BY AN EVENT"), KEY(IDLE,"IDLE/TIMEOUT EVENT"), KEY(MAX,"MAXIMUM KEY VALUE"), { 0, 0 } }; #define CLR(colour) { clr_##colour, (const char *) (#colour) } static struct { unsigned colour; const char *name; } colours[] = { CLR(BLACK), CLR(RED), CLR(GREEN), CLR(YELLOW), CLR(BLUE), CLR(MAGENTA), CLR(CYAN), CLR(WHITE), { 0, 0 } }; #define ATR(attr) { CCA_##attr, (const char *) (#attr) } static struct { unsigned long attr; const char *name;
inline static void at91usart_rxsoft(struct at91usart_softc *sc, struct tty *tp, unsigned csr) { u_char *start, *get, *end; int cc; AT91PDC_FIFO_POSTREAD(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC, &sc->sc_rx_fifo); if (ISSET(csr, US_CSR_TIMEOUT | US_CSR_RXBRK)) at91usart_rx_stopped(sc); while ((start = AT91PDC_FIFO_RDPTR(&sc->sc_rx_fifo, &cc)) != NULL) { int (*rint)(int, struct tty *) = tp->t_linesw->l_rint; int code; if (!ISSET(csr, US_CSR_TIMEOUT | US_CSR_RXBRK)) at91usart_rx_started(sc); for (get = start, end = start + cc; get < end; get++) { code = *get; if ((*rint)(code, tp) == -1) { /* * The line discipline's buffer is out of space. */ if (!ISSET(sc->sc_rx_flags, RX_TTY_BLOCKED)) { /* * We're either not using flow control, or the * line discipline didn't tell us to block for * some reason. Either way, we have no way to * know when there's more space available, so * just drop the rest of the data. */ get = end; printf("%s: receive missing data!\n", device_xname(sc->sc_dev)); } else { /* * Don't schedule any more receive processing * until the line discipline tells us there's * space available (through comhwiflow()). * Leave the rest of the data in the input * buffer. */ SET(sc->sc_rx_flags, RX_TTY_OVERFLOWED); } break; } } // tell we've read some bytes... AT91PDC_FIFO_READ(&sc->sc_rx_fifo, get - start); if (ISSET(sc->sc_rx_flags, RX_TTY_BLOCKED)) break; } // h/w flow control hook: if (ISSET(sc->sc_swflags, TIOCFLAG_CRTSCTS)) at91usart_rx_rts_ctl(sc, (AT91PDC_FIFO_SPACE(&sc->sc_rx_fifo) > PDC_BLOCK_SIZE * 2)); // write next pointer if USART is ready: if (AT91PDC_FIFO_PREREAD(sc->sc_iot, sc->sc_ioh, sc->sc_dmat, US_PDC, &sc->sc_rx_fifo, PDC_BLOCK_SIZE)) { SET(sc->sc_ier, US_CSR_ENDRX | US_CSR_RXBUFF | US_CSR_TIMEOUT | US_CSR_RXBRK); } else { CLR(sc->sc_ier, US_CSR_ENDRX | US_CSR_RXBUFF | US_CSR_TIMEOUT | US_CSR_RXBRK); } }
void pic16c62x_device::CALCULATE_Z_FLAG() { if (m_ALU == 0) SET(STATUS, Z_FLAG); else CLR(STATUS, Z_FLAG); }
/* * Attempt to read a disk label from a device * using the indicated strategy routine. * The label must be partly set up before this: * secpercyl, secsize and anything required for a block i/o read * operation in the driver's strategy/start routines * must be filled in before calling us. */ int readdisklabel(dev_t dev, void (*strat)(struct buf *), struct disklabel *lp, int spoofonly) { struct sun_disklabel *slp; struct buf *bp = NULL; int error; if ((error = initdisklabel(lp))) goto done; lp->d_flags |= D_VENDOR; /* * On sparc64 we check for a CD label first, because our * CD install media contains both sparc & sparc64 labels. * We want the sparc64 machine to find the "CD label", not * the SunOS label, for loading its kernel. */ #if NCD > 0 if (strat == cdstrategy) { #if defined(CD9660) if ((error = iso_disklabelspoof(dev, strat, lp)) == 0) goto done; #endif #if defined(UDF) if ((error = udf_disklabelspoof(dev, strat, lp)) == 0) goto done; #endif } #endif /* NCD > 0 */ /* get buffer and initialize it */ bp = geteblk((int)lp->d_secsize); bp->b_dev = dev; if (spoofonly) goto doslabel; bp->b_blkno = LABELSECTOR; bp->b_bcount = lp->d_secsize; CLR(bp->b_flags, B_READ | B_WRITE | B_DONE); SET(bp->b_flags, B_BUSY | B_READ | B_RAW); (*strat)(bp); if (biowait(bp)) { error = bp->b_error; goto done; } slp = (struct sun_disklabel *)bp->b_data; if (slp->sl_magic == SUN_DKMAGIC) { error = disklabel_sun_to_bsd(slp, lp); goto done; } error = checkdisklabel(bp->b_data + LABELOFFSET, lp, 0, DL_GETDSIZE(lp)); if (error == 0) goto done; doslabel: error = readdoslabel(bp, strat, lp, NULL, spoofonly); if (error == 0) goto done; /* A CD9660/UDF label may be on a non-CD drive, so recheck */ #if defined(CD9660) error = iso_disklabelspoof(dev, strat, lp); if (error == 0) goto done; #endif #if defined(UDF) error = udf_disklabelspoof(dev, strat, lp); if (error == 0) goto done; #endif done: if (bp) { bp->b_flags |= B_INVAL; brelse(bp); } disk_change = 1; return (error); }
/** \fn open */ uint8_t OpenDMLHeader::open(const char *name) { uint8_t badAvi=0; uint32_t rd; printf("** opening OpenDML files **"); _fd=ADM_fopen(name,"rb"); if(!_fd) { printf("\n cannot open %s \n",name); return 0; } myName=ADM_strdup(name); #define CLR(x) memset(& x,0,sizeof( x)); CLR( _videostream); CLR( _mainaviheader); _isvideopresent=1; _isaudiopresent=0; _nbTrack=0; riffParser *parser=new riffParser(name); if(MKFCC('R','I','F','F')!=(rd=parser->read32())) { printf("Not riff\n");badAvi=1; printf("%x != %x\n",rd,MKFCC('R','I','F','F')); } parser->read32(); if(MKFCC('A','V','I',' ')!=parser->read32()) { printf("Not Avi\n");badAvi=1; } if(!badAvi) { walk(parser); } delete parser; aprintf("Found %d tracks\n:-----------\n",_nbTrack); // check if it looks like a correct avi if(!_nbTrack) badAvi=1; // if we are up to here -> good avi :) if(badAvi) { printf("FAIL\n"); return 0; } // now read up each parts... //____________________________ #define DUMP_TRACK(i) aprintf(" at %"PRIu64" (%"PRIx64") size : %"PRIu64" (%"PRIx64")\n", \ _Tracks[i].strh.offset,\ _Tracks[i].strh.offset,\ _Tracks[i].strh.size,\ _Tracks[i].strh.size); for(uint32_t i=0;i<_nbTrack;i++) { DUMP_TRACK(i); } uint32_t vidTrack=0xff; // search wich track is the video one // and load it to _videoheader for(uint32_t i=0;i<_nbTrack;i++) { fseeko(_fd,_Tracks[i].strh.offset,SEEK_SET); if(_Tracks[i].strh.size!=sizeof(_videostream)) { printf("[AVI]Mmm(1) we have a bogey here, size mismatch : %"PRIu64"\n",_Tracks[i].strh.size); printf("[AVI]expected %d\n",(int)sizeof(_videostream)); if(_Tracks[i].strh.size<sizeof(_videostream)-8) // RECT is not mandatory { GUI_Error_HIG(QT_TR_NOOP("Malformed header"), NULL); return 0; } printf("[AVI]Trying to continue anyway\n"); } fread(&_videostream,sizeof(_videostream),1,_fd); #ifdef ADM_BIG_ENDIAN Endian_AviStreamHeader(&_videostream); #endif if(_videostream.fccType==MKFCC('v','i','d','s')) { vidTrack=i; printf("Video track is %u\n",i); break; } } if(0xff==vidTrack) { printf("Could not identify video track!"); return 0; } // then bih stuff int32_t extra; // _fd=fopen(name,"rb"); fseeko(_fd,_Tracks[vidTrack].strf.offset,SEEK_SET); extra=_Tracks[vidTrack].strf.size-sizeof(_video_bih); if(extra<0) { printf("[AVI]bih is not big enough (%"PRIu64"/%d)!\n",_Tracks[vidTrack].strf.size,(int)sizeof(_video_bih)); return 0; } fread(&_video_bih,sizeof(_video_bih),1,_fd); #ifdef ADM_BIG_ENDIAN Endian_BitMapInfo(&_video_bih); #endif if(extra>0) { _videoExtraLen=extra; _videoExtraData=new uint8_t [extra]; fread(_videoExtraData,extra,1,_fd); } _isvideopresent=1; //-------------------------------------------------- // Read audio trak info, select if there is // several //-------------------------------------------------- // and audio track if(_mainaviheader.dwStreams>=2) { // which one is the audio track, is there several ? if(!(_nbAudioTracks=countAudioTrack())) { printf("Weird, there is no audio track, but more than one stream...\n"); } else { uint32_t run=0,audio=0; odmlAudioTrack *track; _audioTracks=new odmlAudioTrack[_nbAudioTracks]; _audioStreams=new ADM_audioStream *[_nbAudioTracks]; while(audio<_nbAudioTracks) { ADM_assert(run<_nbTrack); track=&(_audioTracks[audio]); fseeko(_fd,_Tracks[run].strh.offset,SEEK_SET); if(_Tracks[run].strh.size != sizeof(_audiostream)) { printf("[AVI]Mmm(2) we have a bogey here, size mismatch : %"PRIu64"\n",_Tracks[run].strh.size); printf("[AVI]expected %d\n",(int)sizeof(_audiostream)); if(_Tracks[run].strh.size<sizeof(_audiostream)-8) { GUI_Error_HIG(QT_TR_NOOP("Malformed header"), NULL); return 0; } printf("[AVI]Trying to continue anyway\n"); } fread(track->avistream,sizeof(_audiostream),1,_fd); #ifdef ADM_BIG_ENDIAN Endian_AviStreamHeader(track->avistream); #endif if(track->avistream->fccType!=MKFCC('a','u','d','s')) { printf("Not an audio track!\n"); run++; continue; } // now read extra stuff fseeko(_fd,_Tracks[run].strf.offset,SEEK_SET); extra=_Tracks[run].strf.size-sizeof(WAVHeader); if(extra<0) { printf("[AVI]WavHeader is not big enough (%"PRIu64"/%d)!\n", _Tracks[run].strf.size,(int)sizeof(WAVHeader)); return 0; } fread(track->wavHeader,sizeof(WAVHeader),1,_fd); #ifdef ADM_BIG_ENDIAN Endian_WavHeader(track->wavHeader); #endif if(extra>2) { fgetc(_fd);fgetc(_fd); extra-=2; track->extraDataLen=extra; track->extraData=new uint8_t [extra]; fread(track->extraData,extra,1,_fd); } track->trackNum=run; audio++; run++; } } } // now look at the index stuff // there could be 3 cases: // 1- It is a openDML index, meta index + several smaller index // 2- It is a legacy index (type 1 , most common) // 3- It is a broken index or no index at all // // If it is a openDML index we will find a "indx" field in the Tracks // Else we will find it in _regularIndex Track // Since openDML often also have a regular index we will try open DML first uint8_t ret=0; Dump(); // take the size of riff header and actual file size uint64_t riffSize; fseeko(_fd,0,SEEK_END); _fileSize=ftello(_fd); fseeko(_fd,0,SEEK_SET); read32(); riffSize=(uint64_t )read32(); // 1st case, we have an avi < 4 Gb // potentially avi type 1 #if 0 if((_fileSize<4*1024*1024*1024LL)&& // if riff size is ~ fileSize try regular index (abs(riffSize-_fileSize)<1024*1024)) #endif #define HAS(x) if(x) printf(#x" : yes\n"); else printf(#x" : no\n"); // If there is no openDML index HAS( _regularIndex.offset); HAS( _Tracks[vidTrack].indx.offset); if(!ret && _regularIndex.offset &&!_Tracks[vidTrack].indx.offset) // try regular avi if a idx1 field is there (avi index) ret=indexRegular(vidTrack); if (!ret && _Tracks[vidTrack].indx.offset) // Try openDML if a index field is there (openDML) ret=indexODML(vidTrack); if(!ret) { printf("Could not index it properly...\n"); return 0; } if(!_nbAudioTracks) { _isaudiopresent=0; } else { odmlAudioTrack *track; // Check it is not a weird DV file if(fourCC::check(_video_bih.biCompression,(uint8_t *)"dvsd")) { for(int i=0;i<_nbAudioTracks;i++) { track=&(_audioTracks[i]); WAVHeader *hdr= track->wavHeader; if(!hdr->frequency) { ADM_warning("Fixing audio track to be PCM\n"); hdr->frequency=48000; //hdr->channels=2; hdr->byterate=48000*hdr->channels*2; hdr->blockalign=2*hdr->channels; } } } // build audio stream for(int i=0;i<_nbAudioTracks;i++) { track=&(_audioTracks[i]); ADM_aviAudioAccess *access=new ADM_aviAudioAccess(track->index,track->wavHeader, track->nbChunks, myName, track->extraDataLen,track->extraData); _audioStreams[i]= ADM_audioCreateStream((track->wavHeader), access); } } if(!_video_bih.biCompression && fourCC::check(_videostream.fccHandler,(uint8_t*)"DIB ")) { _videostream.fccHandler=_video_bih.biCompression=fourCC::get((uint8_t*)"DIB "); } else _videostream.fccHandler=_video_bih.biCompression; printf("\nOpenDML file successfully read..\n"); if(ret==1) { computePtsDts(); removeEmptyFrames(); } ADM_info("PtsAvailable : %d\n",(int)ptsAvailable); return ret; }