/*ARGSUSED*/ int ksymsread(dev_t dev, struct uio *uio, int flags) { int error; size_t len; caddr_t v; size_t off; if (uio->uio_offset < 0) return (EINVAL); while (uio->uio_resid > 0) { if (uio->uio_offset >= ksym_head_size + ksym_syms_size) break; if (uio->uio_offset < ksym_head_size) { v = ksym_head + uio->uio_offset; len = ksym_head_size - uio->uio_offset; } else { off = uio->uio_offset - ksym_head_size; v = ksym_syms + off; len = ksym_syms_size - off; } if (len > uio->uio_resid) len = uio->uio_resid; if ((error = uiomovei(v, len, uio)) != 0) return (error); } return (0); }
int nvramread(dev_t dev, struct uio *uio, int flags) { u_char buf[NVRAM_SIZE]; u_int pos = uio->uio_offset; u_char *tmp; int count = min(sizeof(buf), uio->uio_resid); int ret; if (!nvram_initialized) return (ENXIO); if (uio->uio_resid == 0) return (0); #ifdef NVRAM_DEBUG printf("attempting to read %d bytes at offset %d\n", count, pos); #endif for (tmp = buf; count-- > 0 && pos < NVRAM_SIZE; ++pos, ++tmp) *tmp = nvram_get_byte(pos); #ifdef NVRAM_DEBUG printf("nvramread read %d bytes (%s)\n", (tmp - buf), tmp); #endif ret = uiomovei((caddr_t)buf, (tmp - buf), uio); uio->uio_offset += uio->uio_resid; return (ret); }
int pipe_write(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred) { int error = 0; int orig_resid; struct pipe *wpipe, *rpipe; rpipe = (struct pipe *) fp->f_data; wpipe = rpipe->pipe_peer; /* * detect loss of pipe read side, issue SIGPIPE if lost. */ if ((wpipe == NULL) || (wpipe->pipe_state & PIPE_EOF)) { return (EPIPE); } ++wpipe->pipe_busy; /* * If it is advantageous to resize the pipe buffer, do * so. */ if ((uio->uio_resid > PIPE_SIZE) && (nbigpipe < LIMITBIGPIPES) && (wpipe->pipe_buffer.size <= PIPE_SIZE) && (wpipe->pipe_buffer.cnt == 0)) { if ((error = pipelock(wpipe)) == 0) { if (pipespace(wpipe, BIG_PIPE_SIZE) == 0) nbigpipe++; pipeunlock(wpipe); } } /* * If an early error occurred unbusy and return, waking up any pending * readers. */ if (error) { --wpipe->pipe_busy; if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); wakeup(wpipe); } return (error); } orig_resid = uio->uio_resid; while (uio->uio_resid) { int space; retrywrite: if (wpipe->pipe_state & PIPE_EOF) { error = EPIPE; break; } space = wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt; /* Writes of size <= PIPE_BUF must be atomic. */ if ((space < uio->uio_resid) && (orig_resid <= PIPE_BUF)) space = 0; if (space > 0) { if ((error = pipelock(wpipe)) == 0) { int size; /* Transfer size */ int segsize; /* first segment to transfer */ /* * If a process blocked in uiomove, our * value for space might be bad. * * XXX will we be ok if the reader has gone * away here? */ if (space > wpipe->pipe_buffer.size - wpipe->pipe_buffer.cnt) { pipeunlock(wpipe); goto retrywrite; } /* * Transfer size is minimum of uio transfer * and free space in pipe buffer. */ if (space > uio->uio_resid) size = uio->uio_resid; else size = space; /* * First segment to transfer is minimum of * transfer size and contiguous space in * pipe buffer. If first segment to transfer * is less than the transfer size, we've got * a wraparound in the buffer. */ segsize = wpipe->pipe_buffer.size - wpipe->pipe_buffer.in; if (segsize > size) segsize = size; /* Transfer first segment */ error = uiomovei(&wpipe->pipe_buffer.buffer[wpipe->pipe_buffer.in], segsize, uio); if (error == 0 && segsize < size) { /* * Transfer remaining part now, to * support atomic writes. Wraparound * happened. */ #ifdef DIAGNOSTIC if (wpipe->pipe_buffer.in + segsize != wpipe->pipe_buffer.size) panic("Expected pipe buffer wraparound disappeared"); #endif error = uiomovei(&wpipe->pipe_buffer.buffer[0], size - segsize, uio); } if (error == 0) { wpipe->pipe_buffer.in += size; if (wpipe->pipe_buffer.in >= wpipe->pipe_buffer.size) { #ifdef DIAGNOSTIC if (wpipe->pipe_buffer.in != size - segsize + wpipe->pipe_buffer.size) panic("Expected wraparound bad"); #endif wpipe->pipe_buffer.in = size - segsize; } wpipe->pipe_buffer.cnt += size; #ifdef DIAGNOSTIC if (wpipe->pipe_buffer.cnt > wpipe->pipe_buffer.size) panic("Pipe buffer overflow"); #endif } pipeunlock(wpipe); } if (error) break; } else { /* * If the "read-side" has been blocked, wake it up now. */ if (wpipe->pipe_state & PIPE_WANTR) { wpipe->pipe_state &= ~PIPE_WANTR; wakeup(wpipe); } /* * don't block on non-blocking I/O */ if (fp->f_flag & FNONBLOCK) { error = EAGAIN; break; } /* * We have no more space and have something to offer, * wake up select/poll. */ pipeselwakeup(wpipe); wpipe->pipe_state |= PIPE_WANTW; error = tsleep(wpipe, (PRIBIO + 1)|PCATCH, "pipewr", 0); if (error) break; /* * If read side wants to go away, we just issue a * signal to ourselves. */ if (wpipe->pipe_state & PIPE_EOF) { error = EPIPE; break; } } } --wpipe->pipe_busy; if ((wpipe->pipe_busy == 0) && (wpipe->pipe_state & PIPE_WANT)) { wpipe->pipe_state &= ~(PIPE_WANT | PIPE_WANTR); wakeup(wpipe); } else if (wpipe->pipe_buffer.cnt > 0) { /* * If we have put any characters in the buffer, we wake up * the reader. */ if (wpipe->pipe_state & PIPE_WANTR) { wpipe->pipe_state &= ~PIPE_WANTR; wakeup(wpipe); } } /* * Don't return EPIPE if I/O was successful */ if ((wpipe->pipe_buffer.cnt == 0) && (uio->uio_resid == 0) && (error == EPIPE)) { error = 0; } if (error == 0) getnanotime(&wpipe->pipe_mtime); /* * We have something to offer, wake up select/poll. */ if (wpipe->pipe_buffer.cnt) pipeselwakeup(wpipe); return (error); }
/* ARGSUSED */ int pipe_read(struct file *fp, off_t *poff, struct uio *uio, struct ucred *cred) { struct pipe *rpipe = (struct pipe *) fp->f_data; int error; int nread = 0; int size; error = pipelock(rpipe); if (error) return (error); ++rpipe->pipe_busy; while (uio->uio_resid) { /* * normal pipe buffer receive */ if (rpipe->pipe_buffer.cnt > 0) { size = rpipe->pipe_buffer.size - rpipe->pipe_buffer.out; if (size > rpipe->pipe_buffer.cnt) size = rpipe->pipe_buffer.cnt; if (size > uio->uio_resid) size = uio->uio_resid; error = uiomovei(&rpipe->pipe_buffer.buffer[rpipe->pipe_buffer.out], size, uio); if (error) { break; } rpipe->pipe_buffer.out += size; if (rpipe->pipe_buffer.out >= rpipe->pipe_buffer.size) rpipe->pipe_buffer.out = 0; rpipe->pipe_buffer.cnt -= size; /* * If there is no more to read in the pipe, reset * its pointers to the beginning. This improves * cache hit stats. */ if (rpipe->pipe_buffer.cnt == 0) { rpipe->pipe_buffer.in = 0; rpipe->pipe_buffer.out = 0; } nread += size; } else { /* * detect EOF condition * read returns 0 on EOF, no need to set error */ if (rpipe->pipe_state & PIPE_EOF) break; /* * If the "write-side" has been blocked, wake it up now. */ if (rpipe->pipe_state & PIPE_WANTW) { rpipe->pipe_state &= ~PIPE_WANTW; wakeup(rpipe); } /* * Break if some data was read. */ if (nread > 0) break; /* * Unlock the pipe buffer for our remaining processing. * We will either break out with an error or we will * sleep and relock to loop. */ pipeunlock(rpipe); /* * Handle non-blocking mode operation or * wait for more data. */ if (fp->f_flag & FNONBLOCK) { error = EAGAIN; } else { rpipe->pipe_state |= PIPE_WANTR; if ((error = tsleep(rpipe, PRIBIO|PCATCH, "piperd", 0)) == 0) error = pipelock(rpipe); } if (error) goto unlocked_error; } } pipeunlock(rpipe); if (error == 0) getnanotime(&rpipe->pipe_atime); unlocked_error: --rpipe->pipe_busy; /* * PIPE_WANT processing only makes sense if pipe_busy is 0. */ if ((rpipe->pipe_busy == 0) && (rpipe->pipe_state & PIPE_WANT)) { rpipe->pipe_state &= ~(PIPE_WANT|PIPE_WANTW); wakeup(rpipe); } else if (rpipe->pipe_buffer.cnt < MINPIPESIZE) { /* * Handle write blocking hysteresis. */ if (rpipe->pipe_state & PIPE_WANTW) { rpipe->pipe_state &= ~PIPE_WANTW; wakeup(rpipe); } } if ((rpipe->pipe_buffer.size - rpipe->pipe_buffer.cnt) >= PIPE_BUF) pipeselwakeup(rpipe); return (error); }
// ffs文件系统的写入操作 int ffs_write(void *v) { struct vop_write_args *ap = v; struct vnode *vp; struct uio *uio; struct inode *ip; struct fs *fs; struct buf *bp; daddr_t lbn; off_t osize; int blkoffset, error, extended, flags, ioflag, size, xfersize; ssize_t resid, overrun; extended = 0; ioflag = ap->a_ioflag; uio = ap->a_uio; vp = ap->a_vp; ip = VTOI(vp); #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_WRITE) panic("ffs_write: mode"); #endif /* * If writing 0 bytes, succeed and do not change * update time or file offset (standards compliance) */ if (uio->uio_resid == 0) return (0); switch (vp->v_type) { case VREG: if (ioflag & IO_APPEND) uio->uio_offset = DIP(ip, size); if ((DIP(ip, flags) & APPEND) && uio->uio_offset != DIP(ip, size)) return (EPERM); /* FALLTHROUGH */ case VLNK: break; case VDIR: if ((ioflag & IO_SYNC) == 0) panic("ffs_write: nonsync dir write"); break; default: panic("ffs_write: type"); } fs = ip->i_fs; if (uio->uio_offset < 0 || (u_int64_t)uio->uio_offset + uio->uio_resid > fs->fs_maxfilesize) return (EFBIG); /* do the filesize rlimit check */ if ((error = vn_fsizechk(vp, uio, ioflag, &overrun))) return (error); resid = uio->uio_resid; osize = DIP(ip, size); flags = ioflag & IO_SYNC ? B_SYNC : 0; for (error = 0; uio->uio_resid > 0;) { lbn = lblkno(fs, uio->uio_offset); blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->fs_bsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (fs->fs_bsize > xfersize) flags |= B_CLRBUF; else flags &= ~B_CLRBUF; if ((error = UFS_BUF_ALLOC(ip, uio->uio_offset, xfersize, ap->a_cred, flags, &bp)) != 0) break; if (uio->uio_offset + xfersize > DIP(ip, size)) { DIP_ASSIGN(ip, size, uio->uio_offset + xfersize); uvm_vnp_setsize(vp, DIP(ip, size)); extended = 1; } (void)uvm_vnp_uncache(vp); size = blksize(fs, ip, lbn) - bp->b_resid; if (size < xfersize) xfersize = size; error = uiomovei(bp->b_data + blkoffset, xfersize, uio); if (error != 0) memset(bp->b_data + blkoffset, 0, xfersize); #if 0 if (ioflag & IO_NOCACHE) bp->b_flags |= B_NOCACHE; #endif if (ioflag & IO_SYNC) (void)bwrite(bp); else if (xfersize + blkoffset == fs->fs_bsize) { if (doclusterwrite) cluster_write(bp, &ip->i_ci, DIP(ip, size)); else bawrite(bp); } else bdwrite(bp); if (error || xfersize == 0) break; ip->i_flag |= IN_CHANGE | IN_UPDATE; } /* * If we successfully wrote any data, and we are not the superuser * we clear the setuid and setgid bits as a precaution against * tampering. */ if (resid > uio->uio_resid && ap->a_cred && ap->a_cred->cr_uid != 0) DIP_ASSIGN(ip, mode, DIP(ip, mode) & ~(ISUID | ISGID)); if (resid > uio->uio_resid) VN_KNOTE(vp, NOTE_WRITE | (extended ? NOTE_EXTEND : 0)); if (error) { if (ioflag & IO_UNIT) { (void)UFS_TRUNCATE(ip, osize, ioflag & IO_SYNC, ap->a_cred); uio->uio_offset -= resid - uio->uio_resid; uio->uio_resid = resid; } } else if (resid > uio->uio_resid && (ioflag & IO_SYNC)) { error = UFS_UPDATE(ip, 1); } /* correct the result for writes clamped by vn_fsizechk() */ uio->uio_resid += overrun; return (error); }
/* * Vnode op for reading. */ int ffs_read(void *v) { struct vop_read_args *ap = v; struct vnode *vp; struct inode *ip; struct uio *uio; struct fs *fs; struct buf *bp; daddr_t lbn, nextlbn; off_t bytesinfile; long size, xfersize, blkoffset; mode_t mode; int error; vp = ap->a_vp; ip = VTOI(vp); mode = DIP(ip, mode); uio = ap->a_uio; #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_READ) panic("ffs_read: mode"); if (vp->v_type == VLNK) { if ((int)DIP(ip, size) < vp->v_mount->mnt_maxsymlinklen || (vp->v_mount->mnt_maxsymlinklen == 0 && DIP(ip, blocks) == 0)) panic("ffs_read: short symlink"); } else if (vp->v_type != VREG && vp->v_type != VDIR) panic("ffs_read: type %d", vp->v_type); #endif fs = ip->i_fs; if ((u_int64_t)uio->uio_offset > fs->fs_maxfilesize) return (EFBIG); if (uio->uio_resid == 0) return (0); for (error = 0, bp = NULL; uio->uio_resid > 0; bp = NULL) { if ((bytesinfile = DIP(ip, size) - uio->uio_offset) <= 0) break; lbn = lblkno(fs, uio->uio_offset); nextlbn = lbn + 1; size = fs->fs_bsize; /* WAS blksize(fs, ip, lbn); */ blkoffset = blkoff(fs, uio->uio_offset); xfersize = fs->fs_bsize - blkoffset; if (uio->uio_resid < xfersize) xfersize = uio->uio_resid; if (bytesinfile < xfersize) xfersize = bytesinfile; if (lblktosize(fs, nextlbn) >= DIP(ip, size)) error = bread(vp, lbn, size, &bp); else if (lbn - 1 == ip->i_ci.ci_lastr) { error = bread_cluster(vp, lbn, size, &bp); } else error = bread(vp, lbn, size, &bp); if (error) break; ip->i_ci.ci_lastr = lbn; /* * We should only get non-zero b_resid when an I/O error * has occurred, which should cause us to break above. * However, if the short read did not cause an error, * then we want to ensure that we do not uiomove bad * or uninitialized data. */ size -= bp->b_resid; if (size < xfersize) { if (size == 0) break; xfersize = size; } error = uiomovei(bp->b_data + blkoffset, (int)xfersize, uio); if (error) break; brelse(bp); } if (bp != NULL) brelse(bp); if (!(vp->v_mount->mnt_flag & MNT_NOATIME) || (ip->i_flag & (IN_CHANGE | IN_UPDATE))) { ip->i_flag |= IN_ACCESS; } return (error); }
int mbpp_rw(dev_t dev, struct uio *uio) { int card = MAGMA_CARD(dev); int port = MAGMA_PORT(dev); struct mbpp_softc *ms = mbpp_cd.cd_devs[card]; struct mbpp_port *mp = &ms->ms_port[port]; caddr_t buffer, ptr; int buflen, cnt, len; int s, error = 0; int gotdata = 0; if (uio->uio_resid == 0) return (0); buflen = min(uio->uio_resid, mp->mp_burst); buffer = malloc(buflen, M_DEVBUF, M_WAITOK); SET(mp->mp_flags, MBPPF_UIO); /* * start timeout, if needed */ if (mp->mp_timeout > 0) { SET(mp->mp_flags, MBPPF_TIMEOUT); timeout_add(&mp->mp_timeout_tmo, mp->mp_timeout); } len = cnt = 0; while (uio->uio_resid > 0) { len = min(buflen, uio->uio_resid); ptr = buffer; if (uio->uio_rw == UIO_WRITE) { error = uiomovei(ptr, len, uio); if (error) break; } again: /* goto bad */ /* timed out? */ if (!ISSET(mp->mp_flags, MBPPF_UIO)) break; /* * perform the operation */ if (uio->uio_rw == UIO_WRITE) { cnt = mbpp_send(mp, ptr, len); } else { cnt = mbpp_recv(mp, ptr, len); } if (uio->uio_rw == UIO_READ) { if (cnt) { error = uiomovei(ptr, cnt, uio); if (error) break; gotdata++; } else if (gotdata) /* consider us done */ break; } /* timed out? */ if (!ISSET(mp->mp_flags, MBPPF_UIO)) break; /* * poll delay? */ if (mp->mp_delay > 0) { s = spltty(); /* XXX */ SET(mp->mp_flags, MBPPF_DELAY); timeout_add(&mp->mp_start_tmo, mp->mp_delay); error = tsleep(mp, PCATCH | PZERO, "mbppdelay", 0); splx(s); if (error) break; } /* * don't call uiomove again until we used all the data we grabbed */ if (uio->uio_rw == UIO_WRITE && cnt != len) { ptr += cnt; len -= cnt; cnt = 0; goto again; } } /* * clear timeouts */ s = spltty(); /* XXX */ if (ISSET(mp->mp_flags, MBPPF_TIMEOUT)) { timeout_del(&mp->mp_timeout_tmo); CLR(mp->mp_flags, MBPPF_TIMEOUT); } if (ISSET(mp->mp_flags, MBPPF_DELAY)) { timeout_del(&mp->mp_start_tmo); CLR(mp->mp_flags, MBPPF_DELAY); } splx(s); /* * adjust for those chars that we uiomoved but never actually wrote */ if (uio->uio_rw == UIO_WRITE && cnt != len) { uio->uio_resid += (len - cnt); } free(buffer, M_DEVBUF, 0); return (error); }
/* * Vnode op for write */ int spec_write(void *v) { struct vop_write_args *ap = v; struct vnode *vp = ap->a_vp; struct uio *uio = ap->a_uio; struct proc *p = uio->uio_procp; struct buf *bp; daddr_t bn, bscale; int bsize; struct partinfo dpart; int n, on, majordev; int (*ioctl)(dev_t, u_long, caddr_t, int, struct proc *); int error = 0; #ifdef DIAGNOSTIC if (uio->uio_rw != UIO_WRITE) panic("spec_write mode"); if (uio->uio_segflg == UIO_USERSPACE && uio->uio_procp != curproc) panic("spec_write proc"); #endif switch (vp->v_type) { case VCHR: VOP_UNLOCK(vp, 0, p); error = (*cdevsw[major(vp->v_rdev)].d_write) (vp->v_rdev, uio, ap->a_ioflag); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, p); return (error); case VBLK: if (uio->uio_resid == 0) return (0); if (uio->uio_offset < 0) return (EINVAL); bsize = BLKDEV_IOSIZE; if ((majordev = major(vp->v_rdev)) < nblkdev && (ioctl = bdevsw[majordev].d_ioctl) != NULL && (*ioctl)(vp->v_rdev, DIOCGPART, (caddr_t)&dpart, FREAD, p) == 0) { u_int32_t frag = DISKLABELV1_FFS_FRAG(dpart.part->p_fragblock); u_int32_t fsize = DISKLABELV1_FFS_FSIZE(dpart.part->p_fragblock); if (dpart.part->p_fstype == FS_BSDFFS && frag != 0 && fsize != 0) bsize = frag * fsize; } bscale = btodb(bsize); do { bn = btodb(uio->uio_offset) & ~(bscale - 1); on = uio->uio_offset % bsize; n = min((bsize - on), uio->uio_resid); error = bread(vp, bn, bsize, &bp); n = min(n, bsize - bp->b_resid); if (error) { brelse(bp); return (error); } error = uiomovei((char *)bp->b_data + on, n, uio); if (n + on == bsize) bawrite(bp); else bdwrite(bp); } while (error == 0 && uio->uio_resid > 0 && n != 0); return (error); default: panic("spec_write type"); } /* NOTREACHED */ }
/* * Vnode op for reading. */ int cd9660_read(void *v) { struct vop_read_args *ap = v; struct vnode *vp = ap->a_vp; register struct uio *uio = ap->a_uio; register struct iso_node *ip = VTOI(vp); register struct iso_mnt *imp; struct buf *bp; daddr_t lbn, rablock; off_t diff; int error = 0; long size, n, on; if (uio->uio_resid == 0) return (0); if (uio->uio_offset < 0) return (EINVAL); ip->i_flag |= IN_ACCESS; imp = ip->i_mnt; do { struct cluster_info *ci = &ip->i_ci; lbn = lblkno(imp, uio->uio_offset); on = blkoff(imp, uio->uio_offset); n = min((u_int)(imp->logical_block_size - on), uio->uio_resid); diff = (off_t)ip->i_size - uio->uio_offset; if (diff <= 0) return (0); if (diff < n) n = diff; size = blksize(imp, ip, lbn); rablock = lbn + 1; #define MAX_RA 32 if (ci->ci_lastr + 1 == lbn) { struct ra { daddr_t blks[MAX_RA]; int sizes[MAX_RA]; } *ra; int i; ra = malloc(sizeof *ra, M_TEMP, M_WAITOK); for (i = 0; i < MAX_RA && lblktosize(imp, (rablock + i)) < ip->i_size; i++) { ra->blks[i] = rablock + i; ra->sizes[i] = blksize(imp, ip, rablock + i); } error = breadn(vp, lbn, size, ra->blks, ra->sizes, i, &bp); free(ra, M_TEMP, 0); } else error = bread(vp, lbn, size, &bp); ci->ci_lastr = lbn; n = min(n, size - bp->b_resid); if (error) { brelse(bp); return (error); } error = uiomovei(bp->b_data + on, (int)n, uio); brelse(bp); } while (error == 0 && uio->uio_resid > 0 && n != 0); return (error); }
int bppwrite(dev_t dev, struct uio *uio, int flags) { struct bpp_softc *sc = bpp_cd.cd_devs[BPPUNIT(dev)]; struct lsi64854_softc *lsi = &sc->sc_lsi64854; int error = 0; int s; /* * Wait until the DMA engine is free. */ s = splbpp(); while ((sc->sc_flags & BPP_LOCKED) != 0) { if ((flags & IO_NDELAY) != 0) { splx(s); return (EWOULDBLOCK); } sc->sc_flags |= BPP_WANT; error = tsleep(sc->sc_buf, PZERO | PCATCH, "bppwrite", 0); if (error != 0) { splx(s); return (error); } } sc->sc_flags |= BPP_LOCKED; splx(s); /* * Move data from user space into our private buffer * and start DMA. */ while (uio->uio_resid > 0) { caddr_t bp = sc->sc_buf; size_t len = min(sc->sc_bufsz, uio->uio_resid); if ((error = uiomovei(bp, len, uio)) != 0) break; while (len > 0) { u_int8_t tcr; size_t size = len; DMA_SETUP(lsi, &bp, &len, 0, &size); #ifdef DEBUG if (bppdebug) { int i; printf("bpp: writing %ld : ", len); for (i=0; i<len; i++) printf("%c(0x%x)", bp[i], bp[i]); printf("\n"); } #endif /* Clear direction control bit */ tcr = bus_space_read_1(lsi->sc_bustag, lsi->sc_regs, L64854_REG_TCR); tcr &= ~BPP_TCR_DIR; bus_space_write_1(lsi->sc_bustag, lsi->sc_regs, L64854_REG_TCR, tcr); /* Enable DMA */ s = splbpp(); DMA_GO(lsi); error = tsleep(sc, PZERO | PCATCH, "bppdma", 0); splx(s); if (error != 0) goto out; /* Bail out if bottom half reported an error */ if ((error = sc->sc_error) != 0) goto out; /* * DMA_INTR() does this part. * * len -= size; */ } } out: DPRINTF(("bpp done %x\n", error)); s = splbpp(); sc->sc_flags &= ~BPP_LOCKED; if ((sc->sc_flags & BPP_WANT) != 0) { sc->sc_flags &= ~BPP_WANT; wakeup(sc->sc_buf); } splx(s); return (error); }
int ugen_do_ioctl(struct ugen_softc *sc, int endpt, u_long cmd, caddr_t addr, int flag, struct proc *p) { struct ugen_endpoint *sce; int err; struct usbd_interface *iface; struct usb_config_desc *cd; usb_config_descriptor_t *cdesc; struct usb_interface_desc *id; usb_interface_descriptor_t *idesc; struct usb_endpoint_desc *ed; usb_endpoint_descriptor_t *edesc; struct usb_alt_interface *ai; struct usb_string_desc *si; u_int8_t conf, alt; DPRINTFN(5, ("ugenioctl: cmd=%08lx\n", cmd)); if (usbd_is_dying(sc->sc_udev)) return (EIO); switch (cmd) { case FIONBIO: /* All handled in the upper FS layer. */ return (0); case USB_SET_SHORT_XFER: if (endpt == USB_CONTROL_ENDPOINT) return (EINVAL); /* This flag only affects read */ sce = &sc->sc_endpoints[endpt][IN]; if (sce == NULL || sce->pipeh == NULL) return (EINVAL); if (*(int *)addr) sce->state |= UGEN_SHORT_OK; else sce->state &= ~UGEN_SHORT_OK; return (0); case USB_SET_TIMEOUT: sce = &sc->sc_endpoints[endpt][IN]; if (sce == NULL) return (EINVAL); sce->timeout = *(int *)addr; sce = &sc->sc_endpoints[endpt][OUT]; if (sce == NULL) return (EINVAL); sce->timeout = *(int *)addr; return (0); default: break; } if (endpt != USB_CONTROL_ENDPOINT) return (EINVAL); switch (cmd) { #ifdef UGEN_DEBUG case USB_SETDEBUG: ugendebug = *(int *)addr; break; #endif case USB_GET_CONFIG: err = usbd_get_config(sc->sc_udev, &conf); if (err) return (EIO); *(int *)addr = conf; break; case USB_SET_CONFIG: if (!(flag & FWRITE)) return (EPERM); err = ugen_set_config(sc, *(int *)addr); switch (err) { case USBD_NORMAL_COMPLETION: break; case USBD_IN_USE: return (EBUSY); default: return (EIO); } break; case USB_GET_ALTINTERFACE: ai = (struct usb_alt_interface *)addr; err = usbd_device2interface_handle(sc->sc_udev, ai->uai_interface_index, &iface); if (err) return (EINVAL); idesc = usbd_get_interface_descriptor(iface); if (idesc == NULL) return (EIO); ai->uai_alt_no = idesc->bAlternateSetting; break; case USB_SET_ALTINTERFACE: if (!(flag & FWRITE)) return (EPERM); ai = (struct usb_alt_interface *)addr; err = usbd_device2interface_handle(sc->sc_udev, ai->uai_interface_index, &iface); if (err) return (EINVAL); err = ugen_set_interface(sc, ai->uai_interface_index, ai->uai_alt_no); if (err) return (EINVAL); break; case USB_GET_NO_ALT: ai = (struct usb_alt_interface *)addr; cdesc = usbd_get_cdesc(sc->sc_udev, ai->uai_config_index, 0); if (cdesc == NULL) return (EINVAL); idesc = usbd_find_idesc(cdesc, ai->uai_interface_index, 0); if (idesc == NULL) { free(cdesc, M_TEMP, 0); return (EINVAL); } ai->uai_alt_no = usbd_get_no_alts(cdesc, idesc->bInterfaceNumber); free(cdesc, M_TEMP, 0); break; case USB_GET_DEVICE_DESC: *(usb_device_descriptor_t *)addr = *usbd_get_device_descriptor(sc->sc_udev); break; case USB_GET_CONFIG_DESC: cd = (struct usb_config_desc *)addr; cdesc = usbd_get_cdesc(sc->sc_udev, cd->ucd_config_index, 0); if (cdesc == NULL) return (EINVAL); cd->ucd_desc = *cdesc; free(cdesc, M_TEMP, 0); break; case USB_GET_INTERFACE_DESC: id = (struct usb_interface_desc *)addr; cdesc = usbd_get_cdesc(sc->sc_udev, id->uid_config_index, 0); if (cdesc == NULL) return (EINVAL); if (id->uid_config_index == USB_CURRENT_CONFIG_INDEX && id->uid_alt_index == USB_CURRENT_ALT_INDEX) alt = ugen_get_alt_index(sc, id->uid_interface_index); else alt = id->uid_alt_index; idesc = usbd_find_idesc(cdesc, id->uid_interface_index, alt); if (idesc == NULL) { free(cdesc, M_TEMP, 0); return (EINVAL); } id->uid_desc = *idesc; free(cdesc, M_TEMP, 0); break; case USB_GET_ENDPOINT_DESC: ed = (struct usb_endpoint_desc *)addr; cdesc = usbd_get_cdesc(sc->sc_udev, ed->ued_config_index, 0); if (cdesc == NULL) return (EINVAL); if (ed->ued_config_index == USB_CURRENT_CONFIG_INDEX && ed->ued_alt_index == USB_CURRENT_ALT_INDEX) alt = ugen_get_alt_index(sc, ed->ued_interface_index); else alt = ed->ued_alt_index; edesc = usbd_find_edesc(cdesc, ed->ued_interface_index, alt, ed->ued_endpoint_index); if (edesc == NULL) { free(cdesc, M_TEMP, 0); return (EINVAL); } ed->ued_desc = *edesc; free(cdesc, M_TEMP, 0); break; case USB_GET_FULL_DESC: { int len; struct iovec iov; struct uio uio; struct usb_full_desc *fd = (struct usb_full_desc *)addr; int error; cdesc = usbd_get_cdesc(sc->sc_udev, fd->ufd_config_index, &len); if (cdesc == NULL) return (EINVAL); if (len > fd->ufd_size) len = fd->ufd_size; iov.iov_base = (caddr_t)fd->ufd_data; iov.iov_len = len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_resid = len; uio.uio_offset = 0; uio.uio_segflg = UIO_USERSPACE; uio.uio_rw = UIO_READ; uio.uio_procp = p; error = uiomovei((void *)cdesc, len, &uio); free(cdesc, M_TEMP, 0); return (error); } case USB_GET_STRING_DESC: { int len; si = (struct usb_string_desc *)addr; err = usbd_get_string_desc(sc->sc_udev, si->usd_string_index, si->usd_language_id, &si->usd_desc, &len); if (err) return (EINVAL); break; } case USB_DO_REQUEST: { struct usb_ctl_request *ur = (void *)addr; int len = UGETW(ur->ucr_request.wLength); struct iovec iov; struct uio uio; void *ptr = 0; int error = 0; if (!(flag & FWRITE)) return (EPERM); /* Avoid requests that would damage the bus integrity. */ if ((ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && ur->ucr_request.bRequest == UR_SET_ADDRESS) || (ur->ucr_request.bmRequestType == UT_WRITE_DEVICE && ur->ucr_request.bRequest == UR_SET_CONFIG) || (ur->ucr_request.bmRequestType == UT_WRITE_INTERFACE && ur->ucr_request.bRequest == UR_SET_INTERFACE)) return (EINVAL); if (len < 0 || len > 32767) return (EINVAL); if (len != 0) { iov.iov_base = (caddr_t)ur->ucr_data; iov.iov_len = len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_resid = len; uio.uio_offset = 0; uio.uio_segflg = UIO_USERSPACE; uio.uio_rw = ur->ucr_request.bmRequestType & UT_READ ? UIO_READ : UIO_WRITE; uio.uio_procp = p; ptr = malloc(len, M_TEMP, M_WAITOK); if (uio.uio_rw == UIO_WRITE) { error = uiomovei(ptr, len, &uio); if (error) goto ret; } } sce = &sc->sc_endpoints[endpt][IN]; err = usbd_do_request_flags(sc->sc_udev, &ur->ucr_request, ptr, ur->ucr_flags, &ur->ucr_actlen, sce->timeout); if (err) { error = EIO; goto ret; } /* Only if USBD_SHORT_XFER_OK is set. */ if (len > ur->ucr_actlen) len = ur->ucr_actlen; if (len != 0) { if (uio.uio_rw == UIO_READ) { error = uiomovei(ptr, len, &uio); if (error) goto ret; } } ret: if (ptr) free(ptr, M_TEMP, 0); return (error); } case USB_GET_DEVICEINFO: usbd_fill_deviceinfo(sc->sc_udev, (struct usb_device_info *)addr, 1); break; default: return (EINVAL); } return (0); }
int ugen_do_write(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) { struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][OUT]; u_int32_t n; int flags, error = 0; char buf[UGEN_BBSIZE]; struct usbd_xfer *xfer; usbd_status err; DPRINTFN(5, ("%s: ugenwrite: %d\n", sc->sc_dev.dv_xname, endpt)); if (usbd_is_dying(sc->sc_udev)) return (EIO); if (endpt == USB_CONTROL_ENDPOINT) return (ENODEV); #ifdef DIAGNOSTIC if (sce->edesc == NULL) { printf("ugenwrite: no edesc\n"); return (EIO); } if (sce->pipeh == NULL) { printf("ugenwrite: no pipe\n"); return (EIO); } #endif flags = USBD_SYNCHRONOUS; if (sce->timeout == 0) flags |= USBD_CATCH; switch (sce->edesc->bmAttributes & UE_XFERTYPE) { case UE_BULK: xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == 0) return (EIO); while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { error = uiomovei(buf, n, uio); if (error) break; DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n, flags, sce->timeout, NULL); err = usbd_transfer(xfer); if (err) { usbd_clear_endpoint_stall(sce->pipeh); if (err == USBD_INTERRUPTED) error = EINTR; else if (err == USBD_TIMEOUT) error = ETIMEDOUT; else error = EIO; break; } } usbd_free_xfer(xfer); break; case UE_INTERRUPT: xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == 0) return (EIO); while ((n = min(UGETW(sce->edesc->wMaxPacketSize), uio->uio_resid)) != 0) { error = uiomovei(buf, n, uio); if (error) break; DPRINTFN(1, ("ugenwrite: transfer %d bytes\n", n)); usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n, flags, sce->timeout, NULL); err = usbd_transfer(xfer); if (err) { usbd_clear_endpoint_stall(sce->pipeh); if (err == USBD_INTERRUPTED) error = EINTR; else if (err == USBD_TIMEOUT) error = ETIMEDOUT; else error = EIO; break; } } usbd_free_xfer(xfer); break; default: return (ENXIO); } return (error); }
int ugen_do_read(struct ugen_softc *sc, int endpt, struct uio *uio, int flag) { struct ugen_endpoint *sce = &sc->sc_endpoints[endpt][IN]; u_int32_t n, tn; char buf[UGEN_BBSIZE]; struct usbd_xfer *xfer; usbd_status err; int s; int flags, error = 0; u_char buffer[UGEN_CHUNK]; DPRINTFN(5, ("%s: ugenread: %d\n", sc->sc_dev.dv_xname, endpt)); if (usbd_is_dying(sc->sc_udev)) return (EIO); if (endpt == USB_CONTROL_ENDPOINT) return (ENODEV); #ifdef DIAGNOSTIC if (sce->edesc == NULL) { printf("ugenread: no edesc\n"); return (EIO); } if (sce->pipeh == NULL) { printf("ugenread: no pipe\n"); return (EIO); } #endif switch (sce->edesc->bmAttributes & UE_XFERTYPE) { case UE_INTERRUPT: /* Block until activity occurred. */ s = splusb(); while (sce->q.c_cc == 0) { if (flag & IO_NDELAY) { splx(s); return (EWOULDBLOCK); } sce->state |= UGEN_ASLP; DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); error = tsleep(sce, PZERO | PCATCH, "ugenri", (sce->timeout * hz) / 1000); sce->state &= ~UGEN_ASLP; DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); if (usbd_is_dying(sc->sc_udev)) error = EIO; if (error == EWOULDBLOCK) { /* timeout, return 0 */ error = 0; break; } if (error) break; } splx(s); /* Transfer as many chunks as possible. */ while (sce->q.c_cc > 0 && uio->uio_resid > 0 && !error) { n = min(sce->q.c_cc, uio->uio_resid); if (n > sizeof(buffer)) n = sizeof(buffer); /* Remove a small chunk from the input queue. */ q_to_b(&sce->q, buffer, n); DPRINTFN(5, ("ugenread: got %d chars\n", n)); /* Copy the data to the user process. */ error = uiomovei(buffer, n, uio); if (error) break; } break; case UE_BULK: xfer = usbd_alloc_xfer(sc->sc_udev); if (xfer == 0) return (ENOMEM); flags = USBD_SYNCHRONOUS; if (sce->state & UGEN_SHORT_OK) flags |= USBD_SHORT_XFER_OK; if (sce->timeout == 0) flags |= USBD_CATCH; while ((n = min(UGEN_BBSIZE, uio->uio_resid)) != 0) { DPRINTFN(1, ("ugenread: start transfer %d bytes\n",n)); usbd_setup_xfer(xfer, sce->pipeh, 0, buf, n, flags, sce->timeout, NULL); err = usbd_transfer(xfer); if (err) { usbd_clear_endpoint_stall(sce->pipeh); if (err == USBD_INTERRUPTED) error = EINTR; else if (err == USBD_TIMEOUT) error = ETIMEDOUT; else error = EIO; break; } usbd_get_xfer_status(xfer, NULL, NULL, &tn, NULL); DPRINTFN(1, ("ugenread: got %d bytes\n", tn)); error = uiomovei(buf, tn, uio); if (error || tn < n) break; } usbd_free_xfer(xfer); break; case UE_ISOCHRONOUS: s = splusb(); while (sce->cur == sce->fill) { if (flag & IO_NDELAY) { splx(s); return (EWOULDBLOCK); } sce->state |= UGEN_ASLP; DPRINTFN(5, ("ugenread: sleep on %p\n", sce)); error = tsleep(sce, PZERO | PCATCH, "ugenri", (sce->timeout * hz) / 1000); sce->state &= ~UGEN_ASLP; DPRINTFN(5, ("ugenread: woke, error=%d\n", error)); if (usbd_is_dying(sc->sc_udev)) error = EIO; if (error == EWOULDBLOCK) { /* timeout, return 0 */ error = 0; break; } if (error) break; } while (sce->cur != sce->fill && uio->uio_resid > 0 && !error) { if(sce->fill > sce->cur) n = min(sce->fill - sce->cur, uio->uio_resid); else n = min(sce->limit - sce->cur, uio->uio_resid); DPRINTFN(5, ("ugenread: isoc got %d chars\n", n)); /* Copy the data to the user process. */ error = uiomovei(sce->cur, n, uio); if (error) break; sce->cur += n; if(sce->cur >= sce->limit) sce->cur = sce->ibuf; } splx(s); break; default: return (ENXIO); } return (error); }
int ptcwrite(dev_t dev, struct uio *uio, int flag) { struct pt_softc *pti = pt_softc[minor(dev)]; struct tty *tp = pti->pt_tty; u_char *cp = NULL; int cc = 0, bufcc = 0; u_char buf[BUFSIZ]; size_t cnt = 0; int error = 0; again: if ((tp->t_state&TS_ISOPEN) == 0) goto block; if (pti->pt_flags & PF_REMOTE) { if (tp->t_canq.c_cc) goto block; while (uio->uio_resid > 0 && tp->t_canq.c_cc < TTYHOG(tp) - 1) { if (cc == 0) { cc = MIN(uio->uio_resid, BUFSIZ); cc = min(cc, TTYHOG(tp) - 1 - tp->t_canq.c_cc); if (cc > bufcc) bufcc = cc; cp = buf; error = uiomovei(cp, cc, uio); if (error) goto done; /* check again for safety */ if ((tp->t_state&TS_ISOPEN) == 0) { error = EIO; goto done; } } if (cc) (void) b_to_q((char *)cp, cc, &tp->t_canq); cc = 0; } (void) putc(0, &tp->t_canq); ttwakeup(tp); wakeup(&tp->t_canq); goto done; } do { if (cc == 0) { cc = MIN(uio->uio_resid, BUFSIZ); if (cc > bufcc) bufcc = cc; cp = buf; error = uiomovei(cp, cc, uio); if (error) goto done; /* check again for safety */ if ((tp->t_state&TS_ISOPEN) == 0) { error = EIO; goto done; } } bufcc = cc; while (cc > 0) { if ((tp->t_rawq.c_cc + tp->t_canq.c_cc) >= TTYHOG(tp) - 2 && (tp->t_canq.c_cc > 0 || !ISSET(tp->t_lflag, ICANON))) { wakeup(&tp->t_rawq); goto block; } (*linesw[tp->t_line].l_rint)(*cp++, tp); cnt++; cc--; } cc = 0; } while (uio->uio_resid > 0); goto done; block: /* * Come here to wait for slave to open, for space * in outq, or space in rawq. */ if ((tp->t_state&TS_CARR_ON) == 0) { error = EIO; goto done; } if (flag & IO_NDELAY) { /* adjust for data copied in but not written */ uio->uio_resid += cc; if (cnt == 0) error = EWOULDBLOCK; goto done; } error = tsleep(&tp->t_rawq.c_cf, TTOPRI | PCATCH, ttyout, 0); if (error == 0) goto again; /* adjust for data copied in but not written */ uio->uio_resid += cc; done: if (bufcc) explicit_bzero(buf, bufcc); return (error); }
int ptcread(dev_t dev, struct uio *uio, int flag) { struct pt_softc *pti = pt_softc[minor(dev)]; struct tty *tp = pti->pt_tty; char buf[BUFSIZ]; int error = 0, cc, bufcc = 0; /* * We want to block until the slave * is open, and there's something to read; * but if we lost the slave or we're NBIO, * then return the appropriate error instead. */ for (;;) { if (tp->t_state&TS_ISOPEN) { if (pti->pt_flags&PF_PKT && pti->pt_send) { error = ureadc((int)pti->pt_send, uio); if (error) return (error); if (pti->pt_send & TIOCPKT_IOCTL) { cc = MIN(uio->uio_resid, sizeof(tp->t_termios)); error = uiomovei(&tp->t_termios, cc, uio); if (error) return (error); } pti->pt_send = 0; return (0); } if (pti->pt_flags&PF_UCNTL && pti->pt_ucntl) { error = ureadc((int)pti->pt_ucntl, uio); if (error) return (error); pti->pt_ucntl = 0; return (0); } if (tp->t_outq.c_cc && (tp->t_state&TS_TTSTOP) == 0) break; } if ((tp->t_state&TS_CARR_ON) == 0) return (0); /* EOF */ if (flag & IO_NDELAY) return (EWOULDBLOCK); error = tsleep(&tp->t_outq.c_cf, TTIPRI | PCATCH, ttyin, 0); if (error) return (error); } if (pti->pt_flags & (PF_PKT|PF_UCNTL)) error = ureadc(0, uio); while (uio->uio_resid > 0 && error == 0) { cc = MIN(uio->uio_resid, BUFSIZ); cc = q_to_b(&tp->t_outq, buf, cc); if (cc > bufcc) bufcc = cc; if (cc <= 0) break; error = uiomovei(buf, cc, uio); } ttwakeupwr(tp); if (bufcc) explicit_bzero(buf, bufcc); return (error); }