/* * Simplified in-kernel wrapper calls for extended attribute access. * Both calls pass in a NULL credential, authorizing a "kernel" access. * Set IO_NODELOCKED in ioflg if the vnode is already locked. */ int vn_extattr_get(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, size_t *buflen, void *bf, struct lwp *l) { struct uio auio; struct iovec aiov; int error; aiov.iov_len = *buflen; aiov.iov_base = bf; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_offset = 0; auio.uio_resid = *buflen; UIO_SETUP_SYSSPACE(&auio); if ((ioflg & IO_NODELOCKED) == 0) vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_GETEXTATTR(vp, attrnamespace, attrname, &auio, NULL, NULL); if ((ioflg & IO_NODELOCKED) == 0) VOP_UNLOCK(vp); if (error == 0) *buflen = *buflen - auio.uio_resid; return (error); }
/* * XXX Failure mode if partially written? */ int vn_extattr_set(struct vnode *vp, int ioflg, int attrnamespace, const char *attrname, size_t buflen, const void *bf, struct lwp *l) { struct uio auio; struct iovec aiov; int error; aiov.iov_len = buflen; aiov.iov_base = __UNCONST(bf); /* XXXUNCONST kills const */ auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_WRITE; auio.uio_offset = 0; auio.uio_resid = buflen; UIO_SETUP_SYSSPACE(&auio); if ((ioflg & IO_NODELOCKED) == 0) { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } error = VOP_SETEXTATTR(vp, attrnamespace, attrname, &auio, NULL); if ((ioflg & IO_NODELOCKED) == 0) { VOP_UNLOCK(vp); } return (error); }
/* * Update the disk quota in the quota file. */ int lfs_dq1sync(struct vnode *vp, struct dquot *dq) { struct vnode *dqvp; struct iovec aiov; struct uio auio; int error; if (dq == NODQUOT) panic("dq1sync: dquot"); KASSERT(mutex_owned(&dq->dq_interlock)); if ((dq->dq_flags & DQ_MOD) == 0) return (0); if ((dqvp = dq->dq_ump->um_quotas[dq->dq_type]) == NULLVP) panic("dq1sync: file"); KASSERT(dqvp != vp); vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY); auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = (void *)&dq->dq_un.dq1_dqb; aiov.iov_len = sizeof (struct dqblk); auio.uio_resid = sizeof (struct dqblk); auio.uio_offset = (off_t)(dq->dq_id * sizeof (struct dqblk)); auio.uio_rw = UIO_WRITE; UIO_SETUP_SYSSPACE(&auio); error = VOP_WRITE(dqvp, &auio, 0, dq->dq_ump->um_cred[dq->dq_type]); if (auio.uio_resid && error == 0) error = EIO; dq->dq_flags &= ~DQ_MOD; VOP_UNLOCK(dqvp); return (error); }
int cd9660_getattr(void *v) { struct vop_getattr_args /* { struct vnode *a_vp; struct vattr *a_vap; kauth_cred_t a_cred; } */ *ap = v; struct vnode *vp = ap->a_vp; struct iso_node *ip = VTOI(vp); struct vattr *vap = ap->a_vap; vap->va_fsid = ip->i_dev; vap->va_fileid = ip->i_number; vap->va_mode = ip->inode.iso_mode & ALLPERMS; vap->va_nlink = ip->inode.iso_links; vap->va_uid = ip->inode.iso_uid; vap->va_gid = ip->inode.iso_gid; vap->va_atime = ip->inode.iso_atime; vap->va_mtime = ip->inode.iso_mtime; vap->va_ctime = ip->inode.iso_ctime; vap->va_rdev = ip->inode.iso_rdev; vap->va_size = (u_quad_t) ip->i_size; if (ip->i_size == 0 && vp->v_type == VLNK) { struct vop_readlink_args rdlnk; struct iovec aiov; struct uio auio; char *cp; cp = (char *)malloc(MAXPATHLEN, M_TEMP, M_WAITOK); aiov.iov_base = cp; aiov.iov_len = MAXPATHLEN; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_resid = MAXPATHLEN; UIO_SETUP_SYSSPACE(&auio); rdlnk.a_uio = &auio; rdlnk.a_vp = ap->a_vp; rdlnk.a_cred = ap->a_cred; if (cd9660_readlink(&rdlnk) == 0) vap->va_size = MAXPATHLEN - auio.uio_resid; free(cp, M_TEMP); } vap->va_flags = 0; vap->va_gen = 1; vap->va_blocksize = ip->i_mnt->logical_block_size; vap->va_bytes = (u_quad_t) ip->i_size; vap->va_type = vp->v_type; return (0); }
/* * Package up an I/O request on a vnode into a uio and do it. */ int vn_rdwr(enum uio_rw rw, struct vnode *vp, void *base, int len, off_t offset, enum uio_seg segflg, int ioflg, kauth_cred_t cred, size_t *aresid, struct lwp *l) { struct uio auio; struct iovec aiov; int error; if ((ioflg & IO_NODELOCKED) == 0) { if (rw == UIO_READ) { vn_lock(vp, LK_SHARED | LK_RETRY); } else /* UIO_WRITE */ { vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); } } auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = base; aiov.iov_len = len; auio.uio_resid = len; auio.uio_offset = offset; auio.uio_rw = rw; if (segflg == UIO_SYSSPACE) { UIO_SETUP_SYSSPACE(&auio); } else { auio.uio_vmspace = l->l_proc->p_vmspace; } if ((error = enforce_rlimit_fsize(vp, &auio, ioflg)) != 0) goto out; if (rw == UIO_READ) { error = VOP_READ(vp, &auio, ioflg, cred); } else { error = VOP_WRITE(vp, &auio, ioflg, cred); } if (aresid) *aresid = auio.uio_resid; else if (auio.uio_resid && error == 0) error = EIO; out: if ((ioflg & IO_NODELOCKED) == 0) { VOP_UNLOCK(vp); } return (error); }
/* * Obtain a dquot structure for the specified identifier and quota file * reading the information from the file if necessary. */ int lfs_dq1get(struct vnode *dqvp, u_long id, struct ulfsmount *ump, int type, struct dquot *dq) { struct iovec aiov; struct uio auio; int error; KASSERT(mutex_owned(&dq->dq_interlock)); vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY); auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = (void *)&dq->dq_un.dq1_dqb; aiov.iov_len = sizeof (struct dqblk); auio.uio_resid = sizeof (struct dqblk); auio.uio_offset = (off_t)(id * sizeof (struct dqblk)); auio.uio_rw = UIO_READ; UIO_SETUP_SYSSPACE(&auio); error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]); if (auio.uio_resid == sizeof(struct dqblk) && error == 0) memset((void *)&dq->dq_un.dq1_dqb, 0, sizeof(struct dqblk)); VOP_UNLOCK(dqvp); /* * I/O error in reading quota file, release * quota structure and reflect problem to caller. */ if (error) return (error); /* * Check for no limit to enforce. * Initialize time values if necessary. */ if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) dq->dq_flags |= DQ_FAKE; if (dq->dq_id != 0) { if (dq->dq_btime == 0) dq->dq_btime = time_second + ump->umq1_btime[type]; if (dq->dq_itime == 0) dq->dq_itime = time_second + ump->umq1_itime[type]; } return (0); }
int ulfs_bufio(enum uio_rw rw, struct vnode *vp, void *buf, size_t len, off_t off, int ioflg, kauth_cred_t cred, size_t *aresid, struct lwp *l) { struct iovec iov; struct uio uio; int error; KASSERT(ISSET(ioflg, IO_NODELOCKED)); KASSERT(VOP_ISLOCKED(vp)); KASSERT(rw != UIO_WRITE || VOP_ISLOCKED(vp) == LK_EXCLUSIVE); iov.iov_base = buf; iov.iov_len = len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_resid = len; uio.uio_offset = off; uio.uio_rw = rw; UIO_SETUP_SYSSPACE(&uio); switch (rw) { case UIO_READ: error = lfs_bufrd(vp, &uio, ioflg, cred); break; case UIO_WRITE: error = lfs_bufwr(vp, &uio, ioflg, cred); break; default: panic("invalid uio rw: %d", (int)rw); } if (aresid) *aresid = uio.uio_resid; else if (uio.uio_resid && error == 0) error = EIO; KASSERT(VOP_ISLOCKED(vp)); KASSERT(rw != UIO_WRITE || VOP_ISLOCKED(vp) == LK_EXCLUSIVE); return error; }
/* * Duplicate the current processes' credentials. Since we are called only * as the result of a SET ioctl and only root can do that, any future access * to this "disk" is essentially as root. Note that credentials may change * if some other uid can write directly to the mapped file (NFS). */ static int vndsetcred(struct vnd_softc *vnd, kauth_cred_t cred) { struct uio auio; struct iovec aiov; char *tmpbuf; int error; vnd->sc_cred = kauth_cred_dup(cred); tmpbuf = malloc(DEV_BSIZE, M_TEMP, M_WAITOK); /* XXX: Horrible kludge to establish credentials for NFS */ aiov.iov_base = tmpbuf; aiov.iov_len = min(DEV_BSIZE, dbtob(vnd->sc_size)); auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = 0; auio.uio_rw = UIO_READ; auio.uio_resid = aiov.iov_len; UIO_SETUP_SYSSPACE(&auio); vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY); error = VOP_READ(vnd->sc_vp, &auio, 0, vnd->sc_cred); if (error == 0) { /* * Because vnd does all IO directly through the vnode * we need to flush (at least) the buffer from the above * VOP_READ from the buffer cache to prevent cache * incoherencies. Also, be careful to write dirty * buffers back to stable storage. */ error = vinvalbuf(vnd->sc_vp, V_SAVE, vnd->sc_cred, curlwp, 0, 0); } VOP_UNLOCK(vnd->sc_vp); free(tmpbuf, M_TEMP); return error; }
/* * Linux 'readdir' call. This code is mostly taken from the * SunOS getdents call (see compat/sunos/sunos_misc.c), though * an attempt has been made to keep it a little cleaner. * * The d_off field contains the offset of the next valid entry, * unless the older Linux getdents(2), which used to have it set * to the offset of the entry itself. This function also doesn't * need to deal with the old count == 1 glibc problem. * * Read in BSD-style entries, convert them, and copy them out. * * Note that this doesn't handle union-mounted filesystems. */ int linux_sys_getdents64(struct lwp *l, const struct linux_sys_getdents64_args *uap, register_t *retval) { /* { syscallarg(int) fd; syscallarg(struct linux_dirent64 *) dent; syscallarg(unsigned int) count; } */ struct dirent *bdp; struct vnode *vp; char *inp, *tbuf; /* BSD-format */ int len, reclen; /* BSD-format */ char *outp; /* Linux-format */ int resid, linux_reclen = 0; /* Linux-format */ file_t *fp; struct uio auio; struct iovec aiov; struct linux_dirent64 idb; off_t off; /* true file offset */ int buflen, error, eofflag, nbytes; struct vattr va; off_t *cookiebuf = NULL, *cookie; int ncookies; /* fd_getvnode() will use the descriptor for us */ if ((error = fd_getvnode(SCARG(uap, fd), &fp)) != 0) return (error); if ((fp->f_flag & FREAD) == 0) { error = EBADF; goto out1; } vp = (struct vnode *)fp->f_data; if (vp->v_type != VDIR) { error = EINVAL; goto out1; } if ((error = VOP_GETATTR(vp, &va, l->l_cred))) goto out1; nbytes = SCARG(uap, count); buflen = min(MAXBSIZE, nbytes); if (buflen < va.va_blocksize) buflen = va.va_blocksize; tbuf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); off = fp->f_offset; again: aiov.iov_base = tbuf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_resid = buflen; auio.uio_offset = off; UIO_SETUP_SYSSPACE(&auio); /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &cookiebuf, &ncookies); if (error) goto out; inp = tbuf; outp = (void *)SCARG(uap, dent); resid = nbytes; if ((len = buflen - auio.uio_resid) == 0) goto eof; for (cookie = cookiebuf; len > 0; len -= reclen) { bdp = (struct dirent *)inp; reclen = bdp->d_reclen; if (reclen & 3) panic("linux_readdir"); if (bdp->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookie) off = *cookie++; else off += reclen; continue; } linux_reclen = LINUX_RECLEN(&idb, bdp->d_namlen); if (reclen > len || resid < linux_reclen) { /* entry too big for buffer, so just stop */ outp++; break; } if (cookie) off = *cookie++; /* each entry points to next */ else off += reclen; /* * Massage in place to make a Linux-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_ino = bdp->d_fileno; idb.d_type = bdp->d_type; idb.d_off = off; idb.d_reclen = (u_short)linux_reclen; strcpy(idb.d_name, bdp->d_name); if ((error = copyout((void *)&idb, outp, linux_reclen))) goto out; /* advance past this real entry */ inp += reclen; /* advance output past Linux-shaped entry */ outp += linux_reclen; resid -= linux_reclen; } /* if we squished out the whole block, try again */ if (outp == (void *)SCARG(uap, dent)) { if (cookiebuf) free(cookiebuf, M_TEMP); cookiebuf = NULL; goto again; } fp->f_offset = off; /* update the vnode offset */ eof: *retval = nbytes - resid; out: VOP_UNLOCK(vp, 0); if (cookiebuf) free(cookiebuf, M_TEMP); free(tbuf, M_TEMP); out1: fd_putfile(SCARG(uap, fd)); return error; }
/* * Obtain a dquot structure for the specified identifier and quota file * reading the information from the file if necessary. */ static int dqget(struct vnode *vp, u_long id, struct ufsmount *ump, int type, struct dquot **dqp) { struct dquot *dq, *ndq; struct dqhashhead *dqh; struct vnode *dqvp; struct iovec aiov; struct uio auio; int error; /* Lock to see an up to date value for QTF_CLOSING. */ mutex_enter(&dqlock); dqvp = ump->um_quotas[type]; if (dqvp == NULLVP || (ump->um_qflags[type] & QTF_CLOSING)) { mutex_exit(&dqlock); *dqp = NODQUOT; return (EINVAL); } KASSERT(dqvp != vp); /* * Check the cache first. */ dqh = &dqhashtbl[DQHASH(dqvp, id)]; LIST_FOREACH(dq, dqh, dq_hash) { if (dq->dq_id != id || dq->dq_ump->um_quotas[dq->dq_type] != dqvp) continue; KASSERT(dq->dq_cnt > 0); dqref(dq); mutex_exit(&dqlock); *dqp = dq; return (0); } /* * Not in cache, allocate a new one. */ mutex_exit(&dqlock); ndq = pool_cache_get(dquot_cache, PR_WAITOK); /* * Initialize the contents of the dquot structure. */ memset((char *)ndq, 0, sizeof *ndq); ndq->dq_flags = 0; ndq->dq_id = id; ndq->dq_ump = ump; ndq->dq_type = type; mutex_init(&ndq->dq_interlock, MUTEX_DEFAULT, IPL_NONE); mutex_enter(&dqlock); dqh = &dqhashtbl[DQHASH(dqvp, id)]; LIST_FOREACH(dq, dqh, dq_hash) { if (dq->dq_id != id || dq->dq_ump->um_quotas[dq->dq_type] != dqvp) continue; /* * Another thread beat us allocating this dquot. */ KASSERT(dq->dq_cnt > 0); dqref(dq); mutex_exit(&dqlock); mutex_destroy(&ndq->dq_interlock); pool_cache_put(dquot_cache, ndq); *dqp = dq; return 0; } dq = ndq; LIST_INSERT_HEAD(dqh, dq, dq_hash); dqref(dq); mutex_enter(&dq->dq_interlock); mutex_exit(&dqlock); vn_lock(dqvp, LK_EXCLUSIVE | LK_RETRY); auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = (void *)&dq->dq_dqb; aiov.iov_len = sizeof (struct dqblk); auio.uio_resid = sizeof (struct dqblk); auio.uio_offset = (off_t)(id * sizeof (struct dqblk)); auio.uio_rw = UIO_READ; UIO_SETUP_SYSSPACE(&auio); error = VOP_READ(dqvp, &auio, 0, ump->um_cred[type]); if (auio.uio_resid == sizeof(struct dqblk) && error == 0) memset((void *)&dq->dq_dqb, 0, sizeof(struct dqblk)); VOP_UNLOCK(dqvp, 0); /* * I/O error in reading quota file, release * quota structure and reflect problem to caller. */ if (error) { mutex_enter(&dqlock); LIST_REMOVE(dq, dq_hash); mutex_exit(&dqlock); mutex_exit(&dq->dq_interlock); dqrele(vp, dq); *dqp = NODQUOT; return (error); } /* * Check for no limit to enforce. * Initialize time values if necessary. */ if (dq->dq_isoftlimit == 0 && dq->dq_bsoftlimit == 0 && dq->dq_ihardlimit == 0 && dq->dq_bhardlimit == 0) dq->dq_flags |= DQ_FAKE; if (dq->dq_id != 0) { if (dq->dq_btime == 0) dq->dq_btime = time_second + ump->um_btime[type]; if (dq->dq_itime == 0) dq->dq_itime = time_second + ump->um_itime[type]; } mutex_exit(&dq->dq_interlock); *dqp = dq; return (0); }
/* * Something (e.g. another driver) has called us * with a periph and a scsi-specific ioctl to perform, * better try. If user-level type command, we must * still be running in the context of the calling process */ int scsipi_do_ioctl(struct scsipi_periph *periph, dev_t dev, u_long cmd, void *addr, int flag, struct lwp *l) { int error; SC_DEBUG(periph, SCSIPI_DB2, ("scsipi_do_ioctl(0x%lx)\n", cmd)); if (addr == NULL) return EINVAL; /* Check for the safe-ness of this request. */ switch (cmd) { case OSCIOCIDENTIFY: case SCIOCIDENTIFY: break; case SCIOCCOMMAND: if ((((scsireq_t *)addr)->flags & SCCMD_READ) == 0 && (flag & FWRITE) == 0) return (EBADF); break; default: if ((flag & FWRITE) == 0) return (EBADF); } switch (cmd) { case SCIOCCOMMAND: { scsireq_t *screq = (scsireq_t *)addr; struct scsi_ioctl *si; int len; si = si_get(); si->si_screq = *screq; si->si_periph = periph; len = screq->datalen; if (len) { si->si_iov.iov_base = screq->databuf; si->si_iov.iov_len = len; si->si_uio.uio_iov = &si->si_iov; si->si_uio.uio_iovcnt = 1; si->si_uio.uio_resid = len; si->si_uio.uio_offset = 0; si->si_uio.uio_rw = (screq->flags & SCCMD_READ) ? UIO_READ : UIO_WRITE; if ((flag & FKIOCTL) == 0) { si->si_uio.uio_vmspace = l->l_proc->p_vmspace; } else { UIO_SETUP_SYSSPACE(&si->si_uio); } error = physio(scsistrategy, &si->si_bp, dev, (screq->flags & SCCMD_READ) ? B_READ : B_WRITE, periph->periph_channel->chan_adapter->adapt_minphys, &si->si_uio); } else { /* if no data, no need to translate it.. */ si->si_bp.b_flags = 0; si->si_bp.b_data = 0; si->si_bp.b_bcount = 0; si->si_bp.b_dev = dev; si->si_bp.b_proc = l->l_proc; scsistrategy(&si->si_bp); error = si->si_bp.b_error; } *screq = si->si_screq; si_free(si); return (error); } case SCIOCDEBUG: { int level = *((int *)addr); SC_DEBUG(periph, SCSIPI_DB3, ("debug set to %d\n", level)); periph->periph_dbflags = 0; if (level & 1) periph->periph_dbflags |= SCSIPI_DB1; if (level & 2) periph->periph_dbflags |= SCSIPI_DB2; if (level & 4) periph->periph_dbflags |= SCSIPI_DB3; if (level & 8) periph->periph_dbflags |= SCSIPI_DB4; return (0); } case SCIOCRECONFIG: case SCIOCDECONFIG: return (EINVAL); case SCIOCIDENTIFY: { struct scsi_addr *sca = (struct scsi_addr *)addr; switch (scsipi_periph_bustype(periph)) { case SCSIPI_BUSTYPE_SCSI: sca->type = TYPE_SCSI; sca->addr.scsi.scbus = device_unit(device_parent(periph->periph_dev)); sca->addr.scsi.target = periph->periph_target; sca->addr.scsi.lun = periph->periph_lun; return (0); case SCSIPI_BUSTYPE_ATAPI: sca->type = TYPE_ATAPI; sca->addr.atapi.atbus = device_unit(device_parent(periph->periph_dev)); sca->addr.atapi.drive = periph->periph_target; return (0); } return (ENXIO); } #if defined(COMPAT_12) || defined(COMPAT_FREEBSD) /* SCIOCIDENTIFY before ATAPI staff merge */ case OSCIOCIDENTIFY: { struct oscsi_addr *sca = (struct oscsi_addr *)addr; switch (scsipi_periph_bustype(periph)) { case SCSIPI_BUSTYPE_SCSI: sca->scbus = device_unit(device_parent(periph->periph_dev)); sca->target = periph->periph_target; sca->lun = periph->periph_lun; return (0); } return (ENODEV); } #endif default: return (ENOTTY); } #ifdef DIAGNOSTIC panic("scsipi_do_ioctl: impossible"); #endif }
/* * Vnode op for reading directories. * * Convert the on-disk entries to <sys/dirent.h> entries. * the problem is that the conversion will blow up some entries by four bytes, * so it can't be done in place. This is too bad. Right now the conversion is * done entry by entry, the converted entry is sent via uiomove. * * XXX allocate a buffer, convert as many entries as possible, then send * the whole buffer to uiomove */ int ext2fs_readdir(void *v) { struct vop_readdir_args /* { struct vnode *a_vp; struct uio *a_uio; kauth_cred_t a_cred; int **a_eofflag; off_t **a_cookies; int ncookies; } */ *ap = v; struct uio *uio = ap->a_uio; int error; size_t e2fs_count, readcnt; struct vnode *vp = ap->a_vp; struct m_ext2fs *fs = VTOI(vp)->i_e2fs; struct ext2fs_direct *dp; struct dirent *dstd; struct uio auio; struct iovec aiov; void *dirbuf; off_t off = uio->uio_offset; off_t *cookies = NULL; int nc = 0, ncookies = 0; int e2d_reclen; if (vp->v_type != VDIR) return (ENOTDIR); e2fs_count = uio->uio_resid; /* Make sure we don't return partial entries. */ e2fs_count -= (uio->uio_offset + e2fs_count) & (fs->e2fs_bsize -1); if (e2fs_count <= 0) return (EINVAL); auio = *uio; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_len = e2fs_count; auio.uio_resid = e2fs_count; UIO_SETUP_SYSSPACE(&auio); dirbuf = kmem_alloc(e2fs_count, KM_SLEEP); dstd = kmem_zalloc(sizeof(struct dirent), KM_SLEEP); if (ap->a_ncookies) { nc = e2fs_count / _DIRENT_MINSIZE((struct dirent *)0); ncookies = nc; cookies = malloc(sizeof (off_t) * ncookies, M_TEMP, M_WAITOK); *ap->a_cookies = cookies; } aiov.iov_base = dirbuf; error = UFS_BUFRD(ap->a_vp, &auio, 0, ap->a_cred); if (error == 0) { readcnt = e2fs_count - auio.uio_resid; for (dp = (struct ext2fs_direct *)dirbuf; (char *)dp < (char *)dirbuf + readcnt; ) { e2d_reclen = fs2h16(dp->e2d_reclen); if (e2d_reclen == 0) { error = EIO; break; } ext2fs_dirconv2ffs(dp, dstd); if(dstd->d_reclen > uio->uio_resid) { break; } error = uiomove(dstd, dstd->d_reclen, uio); if (error != 0) { break; } off = off + e2d_reclen; if (cookies != NULL) { *cookies++ = off; if (--ncookies <= 0){ break; /* out of cookies */ } } /* advance dp */ dp = (struct ext2fs_direct *) ((char *)dp + e2d_reclen); } /* we need to correct uio_offset */ uio->uio_offset = off; } kmem_free(dirbuf, e2fs_count); kmem_free(dstd, sizeof(*dstd)); *ap->a_eofflag = ext2fs_size(VTOI(ap->a_vp)) <= uio->uio_offset; if (ap->a_ncookies) { if (error) { free(*ap->a_cookies, M_TEMP); *ap->a_ncookies = 0; *ap->a_cookies = NULL; } else *ap->a_ncookies = nc - ncookies; } return (error); }
/* * Write a directory entry after a call to namei, using the parameters * that it left in nameidata. The argument ip is the inode which the new * directory entry will refer to. Dvp is a pointer to the directory to * be written, which was left locked by namei. Remaining parameters * (ulr_offset, ulr_count) indicate how the space for the new * entry is to be obtained. */ int ext2fs_direnter(struct inode *ip, struct vnode *dvp, const struct ufs_lookup_results *ulr, struct componentname *cnp) { struct ext2fs_direct *ep, *nep; struct inode *dp; struct buf *bp; struct ext2fs_direct newdir; struct iovec aiov; struct uio auio; u_int dsize; int error, loc, newentrysize, spacefree; char *dirbuf; struct ufsmount *ump = VFSTOUFS(dvp->v_mount); int dirblksiz = ump->um_dirblksiz; dp = VTOI(dvp); newdir.e2d_ino = h2fs32(ip->i_number); newdir.e2d_namlen = cnp->cn_namelen; if (ip->i_e2fs->e2fs.e2fs_rev > E2FS_REV0 && (ip->i_e2fs->e2fs.e2fs_features_incompat & EXT2F_INCOMPAT_FTYPE)) { newdir.e2d_type = inot2ext2dt(IFTODT(ip->i_e2fs_mode)); } else { newdir.e2d_type = 0; } memcpy(newdir.e2d_name, cnp->cn_nameptr, (unsigned)cnp->cn_namelen + 1); newentrysize = EXT2FS_DIRSIZ(cnp->cn_namelen); if (ulr->ulr_count == 0) { /* * If ulr_count is 0, then namei could find no * space in the directory. Here, ulr_offset will * be on a directory block boundary and we will write the * new entry into a fresh block. */ if (ulr->ulr_offset & (dirblksiz - 1)) panic("ext2fs_direnter: newblk"); auio.uio_offset = ulr->ulr_offset; newdir.e2d_reclen = h2fs16(dirblksiz); auio.uio_resid = newentrysize; aiov.iov_len = newentrysize; aiov.iov_base = (void *)&newdir; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_WRITE; UIO_SETUP_SYSSPACE(&auio); error = VOP_WRITE(dvp, &auio, IO_SYNC, cnp->cn_cred); if (dirblksiz > dvp->v_mount->mnt_stat.f_bsize) /* XXX should grow with balloc() */ panic("ext2fs_direnter: frag size"); else if (!error) { error = ext2fs_setsize(dp, roundup(ext2fs_size(dp), dirblksiz)); if (error) return (error); dp->i_flag |= IN_CHANGE; uvm_vnp_setsize(dvp, ext2fs_size(dp)); } return (error); } /* * If ulr_count is non-zero, then namei found space * for the new entry in the range ulr_offset to * ulr_offset + ulr_count in the directory. * To use this space, we may have to compact the entries located * there, by copying them together towards the beginning of the * block, leaving the free space in one usable chunk at the end. */ /* * Get the block containing the space for the new directory entry. */ if ((error = ext2fs_blkatoff(dvp, (off_t)ulr->ulr_offset, &dirbuf, &bp)) != 0) return (error); /* * Find space for the new entry. In the simple case, the entry at * offset base will have the space. If it does not, then namei * arranged that compacting the region ulr_offset to * ulr_offset + ulr_count would yield the * space. */ ep = (struct ext2fs_direct *)dirbuf; dsize = EXT2FS_DIRSIZ(ep->e2d_namlen); spacefree = fs2h16(ep->e2d_reclen) - dsize; for (loc = fs2h16(ep->e2d_reclen); loc < ulr->ulr_count; ) { nep = (struct ext2fs_direct *)(dirbuf + loc); if (ep->e2d_ino) { /* trim the existing slot */ ep->e2d_reclen = h2fs16(dsize); ep = (struct ext2fs_direct *)((char *)ep + dsize); } else { /* overwrite; nothing there; header is ours */ spacefree += dsize; } dsize = EXT2FS_DIRSIZ(nep->e2d_namlen); spacefree += fs2h16(nep->e2d_reclen) - dsize; loc += fs2h16(nep->e2d_reclen); memcpy((void *)ep, (void *)nep, dsize); } /* * Update the pointer fields in the previous entry (if any), * copy in the new entry, and write out the block. */ if (ep->e2d_ino == 0) { #ifdef DIAGNOSTIC if (spacefree + dsize < newentrysize) panic("ext2fs_direnter: compact1"); #endif newdir.e2d_reclen = h2fs16(spacefree + dsize); } else { #ifdef DIAGNOSTIC if (spacefree < newentrysize) { printf("ext2fs_direnter: compact2 %u %u", (u_int)spacefree, (u_int)newentrysize); panic("ext2fs_direnter: compact2"); } #endif newdir.e2d_reclen = h2fs16(spacefree); ep->e2d_reclen = h2fs16(dsize); ep = (struct ext2fs_direct *)((char *)ep + dsize); } memcpy((void *)ep, (void *)&newdir, (u_int)newentrysize); error = VOP_BWRITE(bp->b_vp, bp); dp->i_flag |= IN_CHANGE | IN_UPDATE; if (!error && ulr->ulr_endoff && ulr->ulr_endoff < ext2fs_size(dp)) error = ext2fs_truncate(dvp, (off_t)ulr->ulr_endoff, IO_SYNC, cnp->cn_cred); return (error); }
/* * Vnode op for reading directories. * * This routine handles converting from the on-disk directory format * "struct direct" to the in-memory format "struct dirent" as well as * byte swapping the entries if necessary. */ int ufs_readdir(void *v) { struct vop_readdir_args /* { struct vnode *a_vp; struct uio *a_uio; kauth_cred_t a_cred; int *a_eofflag; off_t **a_cookies; int *ncookies; } */ *ap = v; struct vnode *vp = ap->a_vp; struct direct *cdp, *ecdp; struct dirent *ndp; char *cdbuf, *ndbuf, *endp; struct uio auio, *uio; struct iovec aiov; int error; size_t count, ccount, rcount, cdbufsz, ndbufsz; off_t off, *ccp; off_t startoff; size_t skipbytes; struct ufsmount *ump = VFSTOUFS(vp->v_mount); int nswap = UFS_MPNEEDSWAP(ump); #if BYTE_ORDER == LITTLE_ENDIAN int needswap = ump->um_maxsymlinklen <= 0 && nswap == 0; #else int needswap = ump->um_maxsymlinklen <= 0 && nswap != 0; #endif uio = ap->a_uio; count = uio->uio_resid; rcount = count - ((uio->uio_offset + count) & (ump->um_dirblksiz - 1)); if (rcount < _DIRENT_MINSIZE(cdp) || count < _DIRENT_MINSIZE(ndp)) return EINVAL; startoff = uio->uio_offset & ~(ump->um_dirblksiz - 1); skipbytes = uio->uio_offset - startoff; rcount += skipbytes; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = startoff; auio.uio_resid = rcount; UIO_SETUP_SYSSPACE(&auio); auio.uio_rw = UIO_READ; cdbufsz = rcount; cdbuf = kmem_alloc(cdbufsz, KM_SLEEP); aiov.iov_base = cdbuf; aiov.iov_len = rcount; error = VOP_READ(vp, &auio, 0, ap->a_cred); if (error != 0) { kmem_free(cdbuf, cdbufsz); return error; } rcount -= auio.uio_resid; cdp = (struct direct *)(void *)cdbuf; ecdp = (struct direct *)(void *)&cdbuf[rcount]; ndbufsz = count; ndbuf = kmem_alloc(ndbufsz, KM_SLEEP); ndp = (struct dirent *)(void *)ndbuf; endp = &ndbuf[count]; off = uio->uio_offset; if (ap->a_cookies) { ccount = rcount / _DIRENT_RECLEN(cdp, 1); ccp = *(ap->a_cookies) = malloc(ccount * sizeof(*ccp), M_TEMP, M_WAITOK); } else { /* XXX: GCC */ ccount = 0; ccp = NULL; } while (cdp < ecdp) { cdp->d_reclen = ufs_rw16(cdp->d_reclen, nswap); if (skipbytes > 0) { if (cdp->d_reclen <= skipbytes) { skipbytes -= cdp->d_reclen; cdp = _DIRENT_NEXT(cdp); continue; } /* * invalid cookie. */ error = EINVAL; goto out; } if (cdp->d_reclen == 0) { struct dirent *ondp = ndp; ndp->d_reclen = _DIRENT_MINSIZE(ndp); ndp = _DIRENT_NEXT(ndp); ondp->d_reclen = 0; cdp = ecdp; break; } if (needswap) { ndp->d_type = cdp->d_namlen; ndp->d_namlen = cdp->d_type; } else { ndp->d_type = cdp->d_type; ndp->d_namlen = cdp->d_namlen; } ndp->d_reclen = _DIRENT_RECLEN(ndp, ndp->d_namlen); if ((char *)(void *)ndp + ndp->d_reclen + _DIRENT_MINSIZE(ndp) > endp) break; ndp->d_fileno = ufs_rw32(cdp->d_ino, nswap); (void)memcpy(ndp->d_name, cdp->d_name, ndp->d_namlen); memset(&ndp->d_name[ndp->d_namlen], 0, ndp->d_reclen - _DIRENT_NAMEOFF(ndp) - ndp->d_namlen); off += cdp->d_reclen; if (ap->a_cookies) { KASSERT(ccp - *(ap->a_cookies) < ccount); *(ccp++) = off; } ndp = _DIRENT_NEXT(ndp); cdp = _DIRENT_NEXT(cdp); } count = ((char *)(void *)ndp - ndbuf); error = uiomove(ndbuf, count, uio); out: if (ap->a_cookies) { if (error) { free(*(ap->a_cookies), M_TEMP); *(ap->a_cookies) = NULL; *(ap->a_ncookies) = 0; } else { *ap->a_ncookies = ccp - *(ap->a_cookies); } } uio->uio_offset = off; kmem_free(ndbuf, ndbufsz); kmem_free(cdbuf, cdbufsz); *ap->a_eofflag = VTOI(vp)->i_size <= uio->uio_offset; return error; }
/* compressed file read */ static void compstrategy(struct buf *bp, off_t bn) { int error; int unit = vndunit(bp->b_dev); struct vnd_softc *vnd = device_lookup_private(&vnd_cd, unit); u_int32_t comp_block; struct uio auio; char *addr; int s; /* set up constants for data move */ auio.uio_rw = UIO_READ; UIO_SETUP_SYSSPACE(&auio); /* read, and transfer the data */ addr = bp->b_data; bp->b_resid = bp->b_bcount; s = splbio(); while (bp->b_resid > 0) { unsigned length; size_t length_in_buffer; u_int32_t offset_in_buffer; struct iovec aiov; /* calculate the compressed block number */ comp_block = bn / (off_t)vnd->sc_comp_blksz; /* check for good block number */ if (comp_block >= vnd->sc_comp_numoffs) { bp->b_error = EINVAL; splx(s); return; } /* read in the compressed block, if not in buffer */ if (comp_block != vnd->sc_comp_buffblk) { length = vnd->sc_comp_offsets[comp_block + 1] - vnd->sc_comp_offsets[comp_block]; vn_lock(vnd->sc_vp, LK_EXCLUSIVE | LK_RETRY); error = vn_rdwr(UIO_READ, vnd->sc_vp, vnd->sc_comp_buff, length, vnd->sc_comp_offsets[comp_block], UIO_SYSSPACE, IO_NODELOCKED|IO_UNIT, vnd->sc_cred, NULL, NULL); if (error) { bp->b_error = error; VOP_UNLOCK(vnd->sc_vp); splx(s); return; } /* uncompress the buffer */ vnd->sc_comp_stream.next_in = vnd->sc_comp_buff; vnd->sc_comp_stream.avail_in = length; vnd->sc_comp_stream.next_out = vnd->sc_comp_decombuf; vnd->sc_comp_stream.avail_out = vnd->sc_comp_blksz; inflateReset(&vnd->sc_comp_stream); error = inflate(&vnd->sc_comp_stream, Z_FINISH); if (error != Z_STREAM_END) { if (vnd->sc_comp_stream.msg) aprint_normal_dev(vnd->sc_dev, "compressed file, %s\n", vnd->sc_comp_stream.msg); bp->b_error = EBADMSG; VOP_UNLOCK(vnd->sc_vp); splx(s); return; } vnd->sc_comp_buffblk = comp_block; VOP_UNLOCK(vnd->sc_vp); } /* transfer the usable uncompressed data */ offset_in_buffer = bn % (off_t)vnd->sc_comp_blksz; length_in_buffer = vnd->sc_comp_blksz - offset_in_buffer; if (length_in_buffer > bp->b_resid) length_in_buffer = bp->b_resid; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; aiov.iov_base = addr; aiov.iov_len = length_in_buffer; auio.uio_resid = aiov.iov_len; auio.uio_offset = 0; error = uiomove(vnd->sc_comp_decombuf + offset_in_buffer, length_in_buffer, &auio); if (error) { bp->b_error = error; splx(s); return; } bn += length_in_buffer; addr += length_in_buffer; bp->b_resid -= length_in_buffer; } splx(s); }
/* * Read a block of directory entries in a file system independent format. */ int compat_43_sys_getdirentries(struct lwp *l, const struct compat_43_sys_getdirentries_args *uap, register_t *retval) { /* { syscallarg(int) fd; syscallarg(char *) buf; syscallarg(u_int) count; syscallarg(long *) basep; } */ struct dirent *bdp; struct vnode *vp; char *inp, *tbuf; /* Current-format */ int len, reclen; /* Current-format */ char *outp; /* Dirent12-format */ int resid, old_reclen = 0; /* Dirent12-format */ struct file *fp; struct uio auio; struct iovec aiov; struct dirent43 idb; off_t off; /* true file offset */ int buflen, error, eofflag, nbytes; struct vattr va; off_t *cookiebuf = NULL, *cookie; int ncookies; long loff; /* fd_getvnode() will use the descriptor for us */ if ((error = fd_getvnode(SCARG(uap, fd), &fp)) != 0) return (error); if ((fp->f_flag & FREAD) == 0) { error = EBADF; goto out1; } vp = (struct vnode *)fp->f_data; if (vp->v_type != VDIR) { error = ENOTDIR; goto out1; } vn_lock(vp, LK_SHARED | LK_RETRY); error = VOP_GETATTR(vp, &va, l->l_cred); VOP_UNLOCK(vp); if (error) goto out1; loff = fp->f_offset; nbytes = SCARG(uap, count); buflen = min(MAXBSIZE, nbytes); if (buflen < va.va_blocksize) buflen = va.va_blocksize; tbuf = malloc(buflen, M_TEMP, M_WAITOK); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); off = fp->f_offset; again: aiov.iov_base = tbuf; aiov.iov_len = buflen; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_resid = buflen; auio.uio_offset = off; UIO_SETUP_SYSSPACE(&auio); /* * First we read into the malloc'ed buffer, then * we massage it into user space, one record at a time. */ error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, &cookiebuf, &ncookies); if (error) goto out; inp = tbuf; outp = SCARG(uap, buf); resid = nbytes; if ((len = buflen - auio.uio_resid) == 0) goto eof; for (cookie = cookiebuf; len > 0; len -= reclen) { bdp = (struct dirent *)inp; reclen = bdp->d_reclen; if (reclen & 3) panic(__func__); if (bdp->d_fileno == 0) { inp += reclen; /* it is a hole; squish it out */ if (cookie) off = *cookie++; else off += reclen; continue; } old_reclen = _DIRENT_RECLEN(&idb, bdp->d_namlen); if (reclen > len || resid < old_reclen) { /* entry too big for buffer, so just stop */ outp++; break; } /* * Massage in place to make a Dirent12-shaped dirent (otherwise * we have to worry about touching user memory outside of * the copyout() call). */ idb.d_fileno = (uint32_t)bdp->d_fileno; idb.d_reclen = (uint16_t)old_reclen; idb.d_namlen = (uint16_t)bdp->d_namlen; strcpy(idb.d_name, bdp->d_name); if ((error = copyout(&idb, outp, old_reclen))) goto out; /* advance past this real entry */ inp += reclen; if (cookie) off = *cookie++; /* each entry points to itself */ else off += reclen; /* advance output past Dirent12-shaped entry */ outp += old_reclen; resid -= old_reclen; } /* if we squished out the whole block, try again */ if (outp == SCARG(uap, buf)) { if (cookiebuf) free(cookiebuf, M_TEMP); cookiebuf = NULL; goto again; } fp->f_offset = off; /* update the vnode offset */ eof: *retval = nbytes - resid; out: VOP_UNLOCK(vp); if (cookiebuf) free(cookiebuf, M_TEMP); free(tbuf, M_TEMP); out1: fd_putfile(SCARG(uap, fd)); if (error) return error; return copyout(&loff, SCARG(uap, basep), sizeof(long)); }
/* * Process debugging system call. */ int sys_ptrace(struct lwp *l, const struct sys_ptrace_args *uap, register_t *retval) { /* { syscallarg(int) req; syscallarg(pid_t) pid; syscallarg(void *) addr; syscallarg(int) data; } */ struct proc *p = l->l_proc; struct lwp *lt; struct proc *t; /* target process */ struct uio uio; struct iovec iov; struct ptrace_io_desc piod; struct ptrace_lwpinfo pl; struct vmspace *vm; int error, write, tmp, req, pheld; int signo; ksiginfo_t ksi; #ifdef COREDUMP char *path; #endif error = 0; req = SCARG(uap, req); /* * If attaching or detaching, we need to get a write hold on the * proclist lock so that we can re-parent the target process. */ mutex_enter(proc_lock); /* "A foolish consistency..." XXX */ if (req == PT_TRACE_ME) { t = p; mutex_enter(t->p_lock); } else { /* Find the process we're supposed to be operating on. */ if ((t = p_find(SCARG(uap, pid), PFIND_LOCKED)) == NULL) { mutex_exit(proc_lock); return (ESRCH); } /* XXX-elad */ mutex_enter(t->p_lock); error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_CANSEE, t, KAUTH_ARG(KAUTH_REQ_PROCESS_CANSEE_ENTRY), NULL, NULL); if (error) { mutex_exit(proc_lock); mutex_exit(t->p_lock); return (ESRCH); } } /* * Grab a reference on the process to prevent it from execing or * exiting. */ if (!rw_tryenter(&t->p_reflock, RW_READER)) { mutex_exit(proc_lock); mutex_exit(t->p_lock); return EBUSY; } /* Make sure we can operate on it. */ switch (req) { case PT_TRACE_ME: /* Saying that you're being traced is always legal. */ break; case PT_ATTACH: /* * You can't attach to a process if: * (1) it's the process that's doing the attaching, */ if (t->p_pid == p->p_pid) { error = EINVAL; break; } /* * (2) it's a system process */ if (t->p_flag & PK_SYSTEM) { error = EPERM; break; } /* * (3) it's already being traced, or */ if (ISSET(t->p_slflag, PSL_TRACED)) { error = EBUSY; break; } /* * (4) the tracer is chrooted, and its root directory is * not at or above the root directory of the tracee */ mutex_exit(t->p_lock); /* XXXSMP */ tmp = proc_isunder(t, l); mutex_enter(t->p_lock); /* XXXSMP */ if (!tmp) { error = EPERM; break; } break; case PT_READ_I: case PT_READ_D: case PT_WRITE_I: case PT_WRITE_D: case PT_IO: #ifdef PT_GETREGS case PT_GETREGS: #endif #ifdef PT_SETREGS case PT_SETREGS: #endif #ifdef PT_GETFPREGS case PT_GETFPREGS: #endif #ifdef PT_SETFPREGS case PT_SETFPREGS: #endif #ifdef __HAVE_PTRACE_MACHDEP PTRACE_MACHDEP_REQUEST_CASES #endif /* * You can't read/write the memory or registers of a process * if the tracer is chrooted, and its root directory is not at * or above the root directory of the tracee. */ mutex_exit(t->p_lock); /* XXXSMP */ tmp = proc_isunder(t, l); mutex_enter(t->p_lock); /* XXXSMP */ if (!tmp) { error = EPERM; break; } /*FALLTHROUGH*/ case PT_CONTINUE: case PT_KILL: case PT_DETACH: case PT_LWPINFO: case PT_SYSCALL: #ifdef COREDUMP case PT_DUMPCORE: #endif #ifdef PT_STEP case PT_STEP: #endif /* * You can't do what you want to the process if: * (1) It's not being traced at all, */ if (!ISSET(t->p_slflag, PSL_TRACED)) { error = EPERM; break; } /* * (2) it's being traced by procfs (which has * different signal delivery semantics), */ if (ISSET(t->p_slflag, PSL_FSTRACE)) { uprintf("file system traced\n"); error = EBUSY; break; } /* * (3) it's not being traced by _you_, or */ if (t->p_pptr != p) { uprintf("parent %d != %d\n", t->p_pptr->p_pid, p->p_pid); error = EBUSY; break; } /* * (4) it's not currently stopped. */ if (t->p_stat != SSTOP || !t->p_waited /* XXXSMP */) { uprintf("stat %d flag %d\n", t->p_stat, !t->p_waited); error = EBUSY; break; } break; default: /* It was not a legal request. */ error = EINVAL; break; } if (error == 0) error = kauth_authorize_process(l->l_cred, KAUTH_PROCESS_PTRACE, t, KAUTH_ARG(req), NULL, NULL); if (error != 0) { mutex_exit(proc_lock); mutex_exit(t->p_lock); rw_exit(&t->p_reflock); return error; } /* Do single-step fixup if needed. */ FIX_SSTEP(t); /* * XXX NJWLWP * * The entire ptrace interface needs work to be useful to a * process with multiple LWPs. For the moment, we'll kluge * this; memory access will be fine, but register access will * be weird. */ lt = LIST_FIRST(&t->p_lwps); KASSERT(lt != NULL); lwp_addref(lt); /* * Which locks do we need held? XXX Ugly. */ switch (req) { #ifdef PT_STEP case PT_STEP: #endif case PT_CONTINUE: case PT_DETACH: case PT_KILL: case PT_SYSCALL: case PT_ATTACH: case PT_TRACE_ME: pheld = 1; break; default: mutex_exit(proc_lock); mutex_exit(t->p_lock); pheld = 0; break; } /* Now do the operation. */ write = 0; *retval = 0; tmp = 0; switch (req) { case PT_TRACE_ME: /* Just set the trace flag. */ SET(t->p_slflag, PSL_TRACED); t->p_opptr = t->p_pptr; break; case PT_WRITE_I: /* XXX no separate I and D spaces */ case PT_WRITE_D: #if defined(__HAVE_RAS) /* * Can't write to a RAS */ if (ras_lookup(t, SCARG(uap, addr)) != (void *)-1) { error = EACCES; break; } #endif write = 1; tmp = SCARG(uap, data); /* FALLTHROUGH */ case PT_READ_I: /* XXX no separate I and D spaces */ case PT_READ_D: /* write = 0 done above. */ iov.iov_base = (void *)&tmp; iov.iov_len = sizeof(tmp); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = (off_t)(unsigned long)SCARG(uap, addr); uio.uio_resid = sizeof(tmp); uio.uio_rw = write ? UIO_WRITE : UIO_READ; UIO_SETUP_SYSSPACE(&uio); error = process_domem(l, lt, &uio); if (!write) *retval = tmp; break; case PT_IO: error = copyin(SCARG(uap, addr), &piod, sizeof(piod)); if (error) break; switch (piod.piod_op) { case PIOD_READ_D: case PIOD_READ_I: uio.uio_rw = UIO_READ; break; case PIOD_WRITE_D: case PIOD_WRITE_I: /* * Can't write to a RAS */ if (ras_lookup(t, SCARG(uap, addr)) != (void *)-1) { return (EACCES); } uio.uio_rw = UIO_WRITE; break; default: error = EINVAL; break; } if (error) break; error = proc_vmspace_getref(l->l_proc, &vm); if (error) break; iov.iov_base = piod.piod_addr; iov.iov_len = piod.piod_len; uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = (off_t)(unsigned long)piod.piod_offs; uio.uio_resid = piod.piod_len; uio.uio_vmspace = vm; error = process_domem(l, lt, &uio); piod.piod_len -= uio.uio_resid; (void) copyout(&piod, SCARG(uap, addr), sizeof(piod)); uvmspace_free(vm); break; #ifdef COREDUMP case PT_DUMPCORE: if ((path = SCARG(uap, addr)) != NULL) { char *dst; int len = SCARG(uap, data); if (len < 0 || len >= MAXPATHLEN) { error = EINVAL; break; } dst = malloc(len + 1, M_TEMP, M_WAITOK); if ((error = copyin(path, dst, len)) != 0) { free(dst, M_TEMP); break; } path = dst; path[len] = '\0'; } error = coredump(lt, path); if (path) free(path, M_TEMP); break; #endif #ifdef PT_STEP case PT_STEP: /* * From the 4.4BSD PRM: * "Execution continues as in request PT_CONTINUE; however * as soon as possible after execution of at least one * instruction, execution stops again. [ ... ]" */ #endif case PT_CONTINUE: case PT_SYSCALL: case PT_DETACH: if (req == PT_SYSCALL) { if (!ISSET(t->p_slflag, PSL_SYSCALL)) { SET(t->p_slflag, PSL_SYSCALL); #ifdef __HAVE_SYSCALL_INTERN (*t->p_emul->e_syscall_intern)(t); #endif } } else { if (ISSET(t->p_slflag, PSL_SYSCALL)) { CLR(t->p_slflag, PSL_SYSCALL); #ifdef __HAVE_SYSCALL_INTERN (*t->p_emul->e_syscall_intern)(t); #endif } } p->p_trace_enabled = trace_is_enabled(p); /* * From the 4.4BSD PRM: * "The data argument is taken as a signal number and the * child's execution continues at location addr as if it * incurred that signal. Normally the signal number will * be either 0 to indicate that the signal that caused the * stop should be ignored, or that value fetched out of * the process's image indicating which signal caused * the stop. If addr is (int *)1 then execution continues * from where it stopped." */ /* Check that the data is a valid signal number or zero. */ if (SCARG(uap, data) < 0 || SCARG(uap, data) >= NSIG) { error = EINVAL; break; } uvm_lwp_hold(lt); /* If the address parameter is not (int *)1, set the pc. */ if ((int *)SCARG(uap, addr) != (int *)1) if ((error = process_set_pc(lt, SCARG(uap, addr))) != 0) { uvm_lwp_rele(lt); break; } #ifdef PT_STEP /* * Arrange for a single-step, if that's requested and possible. */ error = process_sstep(lt, req == PT_STEP); if (error) { uvm_lwp_rele(lt); break; } #endif uvm_lwp_rele(lt); if (req == PT_DETACH) { CLR(t->p_slflag, PSL_TRACED|PSL_FSTRACE|PSL_SYSCALL); /* give process back to original parent or init */ if (t->p_opptr != t->p_pptr) { struct proc *pp = t->p_opptr; proc_reparent(t, pp ? pp : initproc); } /* not being traced any more */ t->p_opptr = NULL; } signo = SCARG(uap, data); sendsig: /* Finally, deliver the requested signal (or none). */ if (t->p_stat == SSTOP) { /* * Unstop the process. If it needs to take a * signal, make all efforts to ensure that at * an LWP runs to see it. */ t->p_xstat = signo; proc_unstop(t); } else if (signo != 0) { KSI_INIT_EMPTY(&ksi); ksi.ksi_signo = signo; kpsignal2(t, &ksi); } break; case PT_KILL: /* just send the process a KILL signal. */ signo = SIGKILL; goto sendsig; /* in PT_CONTINUE, above. */ case PT_ATTACH: /* * Go ahead and set the trace flag. * Save the old parent (it's reset in * _DETACH, and also in kern_exit.c:wait4() * Reparent the process so that the tracing * proc gets to see all the action. * Stop the target. */ t->p_opptr = t->p_pptr; if (t->p_pptr != p) { struct proc *parent = t->p_pptr; if (parent->p_lock < t->p_lock) { if (!mutex_tryenter(parent->p_lock)) { mutex_exit(t->p_lock); mutex_enter(parent->p_lock); } } else if (parent->p_lock > t->p_lock) { mutex_enter(parent->p_lock); } parent->p_slflag |= PSL_CHTRACED; proc_reparent(t, p); if (parent->p_lock != t->p_lock) mutex_exit(parent->p_lock); } SET(t->p_slflag, PSL_TRACED); signo = SIGSTOP; goto sendsig; case PT_LWPINFO: if (SCARG(uap, data) != sizeof(pl)) { error = EINVAL; break; } error = copyin(SCARG(uap, addr), &pl, sizeof(pl)); if (error) break; tmp = pl.pl_lwpid; lwp_delref(lt); mutex_enter(t->p_lock); if (tmp == 0) lt = LIST_FIRST(&t->p_lwps); else { lt = lwp_find(t, tmp); if (lt == NULL) { mutex_exit(t->p_lock); error = ESRCH; break; } lt = LIST_NEXT(lt, l_sibling); } while (lt != NULL && lt->l_stat == LSZOMB) lt = LIST_NEXT(lt, l_sibling); pl.pl_lwpid = 0; pl.pl_event = 0; if (lt) { lwp_addref(lt); pl.pl_lwpid = lt->l_lid; if (lt->l_lid == t->p_sigctx.ps_lwp) pl.pl_event = PL_EVENT_SIGNAL; } mutex_exit(t->p_lock); error = copyout(&pl, SCARG(uap, addr), sizeof(pl)); break; #ifdef PT_SETREGS case PT_SETREGS: write = 1; #endif #ifdef PT_GETREGS case PT_GETREGS: /* write = 0 done above. */ #endif #if defined(PT_SETREGS) || defined(PT_GETREGS) tmp = SCARG(uap, data); if (tmp != 0 && t->p_nlwps > 1) { lwp_delref(lt); mutex_enter(t->p_lock); lt = lwp_find(t, tmp); if (lt == NULL) { mutex_exit(t->p_lock); error = ESRCH; break; } lwp_addref(lt); mutex_exit(t->p_lock); } if (!process_validregs(lt)) error = EINVAL; else { error = proc_vmspace_getref(l->l_proc, &vm); if (error) break; iov.iov_base = SCARG(uap, addr); iov.iov_len = sizeof(struct reg); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = sizeof(struct reg); uio.uio_rw = write ? UIO_WRITE : UIO_READ; uio.uio_vmspace = vm; error = process_doregs(l, lt, &uio); uvmspace_free(vm); } break; #endif #ifdef PT_SETFPREGS case PT_SETFPREGS: write = 1; #endif #ifdef PT_GETFPREGS case PT_GETFPREGS: /* write = 0 done above. */ #endif #if defined(PT_SETFPREGS) || defined(PT_GETFPREGS) tmp = SCARG(uap, data); if (tmp != 0 && t->p_nlwps > 1) { lwp_delref(lt); mutex_enter(t->p_lock); lt = lwp_find(t, tmp); if (lt == NULL) { mutex_exit(t->p_lock); error = ESRCH; break; } lwp_addref(lt); mutex_exit(t->p_lock); } if (!process_validfpregs(lt)) error = EINVAL; else { error = proc_vmspace_getref(l->l_proc, &vm); if (error) break; iov.iov_base = SCARG(uap, addr); iov.iov_len = sizeof(struct fpreg); uio.uio_iov = &iov; uio.uio_iovcnt = 1; uio.uio_offset = 0; uio.uio_resid = sizeof(struct fpreg); uio.uio_rw = write ? UIO_WRITE : UIO_READ; uio.uio_vmspace = vm; error = process_dofpregs(l, lt, &uio); uvmspace_free(vm); } break; #endif #ifdef __HAVE_PTRACE_MACHDEP PTRACE_MACHDEP_REQUEST_CASES error = ptrace_machdep_dorequest(l, lt, req, SCARG(uap, addr), SCARG(uap, data)); break; #endif } if (pheld) { mutex_exit(t->p_lock); mutex_exit(proc_lock); } if (lt != NULL) lwp_delref(lt); rw_exit(&t->p_reflock); return error; }
/* * Vnode op for reading directories. * * This routine handles converting from the on-disk directory format * "struct lfs_direct" to the in-memory format "struct dirent" as well as * byte swapping the entries if necessary. */ int ulfs_readdir(void *v) { struct vop_readdir_args /* { struct vnode *a_vp; struct uio *a_uio; kauth_cred_t a_cred; int *a_eofflag; off_t **a_cookies; int *ncookies; } */ *ap = v; struct vnode *vp = ap->a_vp; LFS_DIRHEADER *cdp, *ecdp; struct dirent *ndp; char *cdbuf, *ndbuf, *endp; struct uio auio, *uio; struct iovec aiov; int error; size_t count, ccount, rcount, cdbufsz, ndbufsz; off_t off, *ccp; off_t startoff; size_t skipbytes; struct ulfsmount *ump = VFSTOULFS(vp->v_mount); struct lfs *fs = ump->um_lfs; uio = ap->a_uio; count = uio->uio_resid; rcount = count - ((uio->uio_offset + count) & (fs->um_dirblksiz - 1)); if (rcount < LFS_DIRECTSIZ(fs, 0) || count < _DIRENT_MINSIZE(ndp)) return EINVAL; startoff = uio->uio_offset & ~(fs->um_dirblksiz - 1); skipbytes = uio->uio_offset - startoff; rcount += skipbytes; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = startoff; auio.uio_resid = rcount; UIO_SETUP_SYSSPACE(&auio); auio.uio_rw = UIO_READ; cdbufsz = rcount; cdbuf = kmem_alloc(cdbufsz, KM_SLEEP); aiov.iov_base = cdbuf; aiov.iov_len = rcount; error = VOP_READ(vp, &auio, 0, ap->a_cred); if (error != 0) { kmem_free(cdbuf, cdbufsz); return error; } rcount -= auio.uio_resid; cdp = (LFS_DIRHEADER *)(void *)cdbuf; ecdp = (LFS_DIRHEADER *)(void *)&cdbuf[rcount]; ndbufsz = count; ndbuf = kmem_alloc(ndbufsz, KM_SLEEP); ndp = (struct dirent *)(void *)ndbuf; endp = &ndbuf[count]; off = uio->uio_offset; if (ap->a_cookies) { ccount = rcount / _DIRENT_RECLEN(ndp, 1); ccp = *(ap->a_cookies) = malloc(ccount * sizeof(*ccp), M_TEMP, M_WAITOK); } else { /* XXX: GCC */ ccount = 0; ccp = NULL; } while (cdp < ecdp) { if (skipbytes > 0) { if (lfs_dir_getreclen(fs, cdp) <= skipbytes) { skipbytes -= lfs_dir_getreclen(fs, cdp); cdp = LFS_NEXTDIR(fs, cdp); continue; } /* * invalid cookie. */ error = EINVAL; goto out; } if (lfs_dir_getreclen(fs, cdp) == 0) { struct dirent *ondp = ndp; ndp->d_reclen = _DIRENT_MINSIZE(ndp); ndp = _DIRENT_NEXT(ndp); ondp->d_reclen = 0; cdp = ecdp; break; } ndp->d_type = lfs_dir_gettype(fs, cdp); ndp->d_namlen = lfs_dir_getnamlen(fs, cdp); ndp->d_reclen = _DIRENT_RECLEN(ndp, ndp->d_namlen); if ((char *)(void *)ndp + ndp->d_reclen + _DIRENT_MINSIZE(ndp) > endp) break; ndp->d_fileno = lfs_dir_getino(fs, cdp); (void)memcpy(ndp->d_name, lfs_dir_nameptr(fs, cdp), ndp->d_namlen); memset(&ndp->d_name[ndp->d_namlen], 0, ndp->d_reclen - _DIRENT_NAMEOFF(ndp) - ndp->d_namlen); off += lfs_dir_getreclen(fs, cdp); if (ap->a_cookies) { KASSERT(ccp - *(ap->a_cookies) < ccount); *(ccp++) = off; } ndp = _DIRENT_NEXT(ndp); cdp = LFS_NEXTDIR(fs, cdp); } count = ((char *)(void *)ndp - ndbuf); error = uiomove(ndbuf, count, uio); out: if (ap->a_cookies) { if (error) { free(*(ap->a_cookies), M_TEMP); *(ap->a_cookies) = NULL; *(ap->a_ncookies) = 0; } else { *ap->a_ncookies = ccp - *(ap->a_cookies); } } uio->uio_offset = off; kmem_free(ndbuf, ndbufsz); kmem_free(cdbuf, cdbufsz); *ap->a_eofflag = VTOI(vp)->i_size <= uio->uio_offset; return error; }
/* * Read a block of directory entries in a file system independent format. */ int compat_43_sys_getdirentries(struct lwp *l, const struct compat_43_sys_getdirentries_args *uap, register_t *retval) { /* { syscallarg(int) fd; syscallarg(char *) buf; syscallarg(u_int) count; syscallarg(long *) basep; } */ struct vnode *vp; struct file *fp; struct uio auio, kuio; struct iovec aiov, kiov; struct dirent *dp, *edp; char *dirbuf; size_t count = min(MAXBSIZE, (size_t)SCARG(uap, count)); int error, eofflag, readcnt; long loff; /* fd_getvnode() will use the descriptor for us */ if ((error = fd_getvnode(SCARG(uap, fd), &fp)) != 0) return (error); if ((fp->f_flag & FREAD) == 0) { error = EBADF; goto out; } vp = (struct vnode *)fp->f_data; unionread: if (vp->v_type != VDIR) { error = EINVAL; goto out; } aiov.iov_base = SCARG(uap, buf); aiov.iov_len = count; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; auio.uio_resid = count; KASSERT(l == curlwp); auio.uio_vmspace = curproc->p_vmspace; vn_lock(vp, LK_EXCLUSIVE | LK_RETRY); loff = auio.uio_offset = fp->f_offset; # if (BYTE_ORDER != LITTLE_ENDIAN) if ((vp->v_mount->mnt_iflag & IMNT_DTYPE) == 0) { error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, (off_t **)0, (int *)0); fp->f_offset = auio.uio_offset; } else # endif { kuio = auio; kuio.uio_iov = &kiov; kiov.iov_len = count; dirbuf = malloc(count, M_TEMP, M_WAITOK); kiov.iov_base = dirbuf; UIO_SETUP_SYSSPACE(&kuio); error = VOP_READDIR(vp, &kuio, fp->f_cred, &eofflag, (off_t **)0, (int *)0); fp->f_offset = kuio.uio_offset; if (error == 0) { readcnt = count - kuio.uio_resid; edp = (struct dirent *)&dirbuf[readcnt]; for (dp = (struct dirent *)dirbuf; dp < edp; ) { # if (BYTE_ORDER == LITTLE_ENDIAN) /* * The expected low byte of * dp->d_namlen is our dp->d_type. * The high MBZ byte of dp->d_namlen * is our dp->d_namlen. */ dp->d_type = dp->d_namlen; dp->d_namlen = 0; # else /* * The dp->d_type is the high byte * of the expected dp->d_namlen, * so must be zero'ed. */ dp->d_type = 0; # endif if (dp->d_reclen > 0) { dp = (struct dirent *) ((char *)dp + dp->d_reclen); } else { error = EIO; break; } } if (dp >= edp) error = uiomove(dirbuf, readcnt, &auio); } free(dirbuf, M_TEMP); } VOP_UNLOCK(vp); if (error) goto out; if ((count == auio.uio_resid) && (vp->v_vflag & VV_ROOT) && (vp->v_mount->mnt_flag & MNT_UNION)) { struct vnode *tvp = vp; vp = vp->v_mount->mnt_vnodecovered; vref(vp); fp->f_data = (void *) vp; fp->f_offset = 0; vrele(tvp); goto unionread; } error = copyout((void *)&loff, (void *)SCARG(uap, basep), sizeof(long)); *retval = count - auio.uio_resid; out: fd_putfile(SCARG(uap, fd)); return (error); }
void test_send_pdu(struct proc *p, iscsi_test_send_pdu_parameters_t *par) { static uint8_t pad_bytes[4] = { 0 }; test_pars_t *tp; connection_t *conn; pdu_t *pdu; uint32_t psize = par->pdu_size; void *pdu_ptr = par->pdu_ptr; struct uio *uio; uint32_t i, pad, dsl, size; int s; if ((tp = find_test_id(par->test_id)) == NULL) { par->status = ISCSI_STATUS_INVALID_ID; return; } if (!psize || pdu_ptr == NULL || ((par->options & ISCSITEST_SFLAG_UPDATE_FIELDS) && psize < BHS_SIZE)) { par->status = ISCSI_STATUS_PARAMETER_INVALID; return; } if ((conn = tp->connection) == NULL) { par->status = ISCSI_STATUS_TEST_INACTIVE; return; } if ((pdu = get_pdu(conn, TRUE)) == NULL) { par->status = ISCSI_STATUS_TEST_CONNECTION_CLOSED; return; } DEB(1, ("Test Send PDU, id %d\n", par->test_id)); if ((par->status = map_databuf(p, &pdu_ptr, psize)) != 0) { free_pdu(pdu); return; } i = 1; if (!par->options) { pdu->io_vec[0].iov_base = pdu_ptr; pdu->io_vec[0].iov_len = size = psize; } else { memcpy(&pdu->pdu, pdu_ptr, BHS_SIZE); if (!(pdu->pdu.Opcode & OP_IMMEDIATE)) conn->session->CmdSN++; pdu->pdu.p.command.CmdSN = htonl(conn->session->CmdSN); dsl = psize - BHS_SIZE; size = BHS_SIZE; hton3(dsl, pdu->pdu.DataSegmentLength); if (conn->HeaderDigest && !(par->options & ISCSITEST_SFLAG_NO_HEADER_DIGEST)) { pdu->pdu.HeaderDigest = gen_digest(&pdu->pdu, BHS_SIZE); size += 4; } pdu->io_vec[0].iov_base = &pdu->pdu; pdu->io_vec[0].iov_len = size; if (dsl) { pdu->io_vec[1].iov_base = &pdu_ptr[BHS_SIZE]; pdu->io_vec[1].iov_len = dsl; i++; size += dsl; /* Pad to next multiple of 4 */ pad = (par->options & ISCSITEST_SFLAG_NO_PADDING) ? 0 : size & 0x03; if (pad) { pad = 4 - pad; pdu->io_vec[i].iov_base = pad_bytes; pdu->io_vec[i].iov_len = pad; i++; size += pad; } if (conn->DataDigest && !(par->options & ISCSITEST_SFLAG_NO_DATA_DIGEST)) { pdu->data_digest = gen_digest_2(&pdu_ptr[BHS_SIZE], dsl, pad_bytes, pad); pdu->io_vec[i].iov_base = &pdu->data_digest; pdu->io_vec[i].iov_len = 4; i++; size += 4; } } } uio = &pdu->uio; uio->uio_iov = pdu->io_vec; UIO_SETUP_SYSSPACE(uio); uio->uio_rw = UIO_WRITE; uio->uio_iovcnt = i; uio->uio_resid = size; pdu->disp = PDUDISP_SIGNAL; pdu->flags = PDUF_BUSY | PDUF_NOUPDATE; s = splbio(); /* Enqueue for sending */ if (pdu->pdu.Opcode & OP_IMMEDIATE) TAILQ_INSERT_HEAD(&conn->pdus_to_send, pdu, send_chain); else TAILQ_INSERT_TAIL(&conn->pdus_to_send, pdu, send_chain); wakeup(&conn->pdus_to_send); tsleep(pdu, PINOD, "test_send_pdu", 0); splx(s); unmap_databuf(p, pdu_ptr, psize); par->status = ISCSI_STATUS_SUCCESS; if (par->options & ISCSITEST_KILL_CONNECTION) kill_connection(conn, ISCSI_STATUS_TEST_CONNECTION_CLOSED, NO_LOGOUT, TRUE); }
int vn_readdir(file_t *fp, char *bf, int segflg, u_int count, int *done, struct lwp *l, off_t **cookies, int *ncookies) { struct vnode *vp = (struct vnode *)fp->f_data; struct iovec aiov; struct uio auio; int error, eofflag; /* Limit the size on any kernel buffers used by VOP_READDIR */ count = min(MAXBSIZE, count); unionread: if (vp->v_type != VDIR) return (EINVAL); aiov.iov_base = bf; aiov.iov_len = count; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_rw = UIO_READ; if (segflg == UIO_SYSSPACE) { UIO_SETUP_SYSSPACE(&auio); } else { KASSERT(l == curlwp); auio.uio_vmspace = l->l_proc->p_vmspace; } auio.uio_resid = count; vn_lock(vp, LK_SHARED | LK_RETRY); auio.uio_offset = fp->f_offset; error = VOP_READDIR(vp, &auio, fp->f_cred, &eofflag, cookies, ncookies); mutex_enter(&fp->f_lock); fp->f_offset = auio.uio_offset; mutex_exit(&fp->f_lock); VOP_UNLOCK(vp); if (error) return (error); if (count == auio.uio_resid && vn_union_readdir_hook) { struct vnode *ovp = vp; error = (*vn_union_readdir_hook)(&vp, fp, l); if (error) return (error); if (vp != ovp) goto unionread; } if (count == auio.uio_resid && (vp->v_vflag & VV_ROOT) && (vp->v_mount->mnt_flag & MNT_UNION)) { struct vnode *tvp = vp; vp = vp->v_mount->mnt_vnodecovered; vref(vp); mutex_enter(&fp->f_lock); fp->f_data = vp; fp->f_offset = 0; mutex_exit(&fp->f_lock); vrele(tvp); goto unionread; } *done = count - auio.uio_resid; return error; }
/* * code for returning process's command line arguments */ int procfs_docmdline( struct lwp *curl, struct proc *p, struct pfsnode *pfs, struct uio *uio ) { struct ps_strings pss; int count, error; size_t i, len, xlen, upper_bound; struct uio auio; struct iovec aiov; struct vmspace *vm; vaddr_t argv; char *arg; /* Don't allow writing. */ if (uio->uio_rw != UIO_READ) return (EOPNOTSUPP); /* * Allocate a temporary buffer to hold the arguments. */ arg = malloc(PAGE_SIZE, M_TEMP, M_WAITOK); /* * Zombies don't have a stack, so we can't read their psstrings. * System processes also don't have a user stack. This is what * ps(1) would display. */ if (P_ZOMBIE(p) || (p->p_flag & PK_SYSTEM) != 0) { len = snprintf(arg, PAGE_SIZE, "(%s)", p->p_comm) + 1; error = uiomove_frombuf(arg, len, uio); free(arg, M_TEMP); return (error); } /* * NOTE: Don't bother doing a process_checkioperm() here * because the psstrings info is available by using ps(1), * so it's not like there's anything to protect here. */ /* * Lock the process down in memory. */ if ((error = proc_vmspace_getref(p, &vm)) != 0) { free(arg, M_TEMP); return (error); } /* * Read in the ps_strings structure. */ aiov.iov_base = &pss; aiov.iov_len = sizeof(pss); auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = (vaddr_t)p->p_psstr; auio.uio_resid = sizeof(pss); auio.uio_rw = UIO_READ; UIO_SETUP_SYSSPACE(&auio); error = uvm_io(&vm->vm_map, &auio); if (error) goto bad; /* * Now read the address of the argument vector. */ aiov.iov_base = &argv; aiov.iov_len = sizeof(argv); auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = (vaddr_t)pss.ps_argvstr; auio.uio_resid = sizeof(argv); auio.uio_rw = UIO_READ; UIO_SETUP_SYSSPACE(&auio); error = uvm_io(&vm->vm_map, &auio); if (error) goto bad; /* * Now copy in the actual argument vector, one page at a time, * since we don't know how long the vector is (though, we do * know how many NUL-terminated strings are in the vector). */ len = 0; count = pss.ps_nargvstr; upper_bound = round_page(uio->uio_offset + uio->uio_resid); for (; count && len < upper_bound; len += xlen) { aiov.iov_base = arg; aiov.iov_len = PAGE_SIZE; auio.uio_iov = &aiov; auio.uio_iovcnt = 1; auio.uio_offset = argv + len; xlen = PAGE_SIZE - ((argv + len) & PAGE_MASK); auio.uio_resid = xlen; auio.uio_rw = UIO_READ; UIO_SETUP_SYSSPACE(&auio); error = uvm_io(&vm->vm_map, &auio); if (error) goto bad; for (i = 0; i < xlen && count != 0; i++) { if (arg[i] == '\0') count--; /* one full string */ } if (len + i > uio->uio_offset) { /* Have data in this page, copy it out */ error = uiomove(arg + uio->uio_offset - len, i + len - uio->uio_offset, uio); if (error || uio->uio_resid <= 0) break; } } bad: /* * Release the process. */ uvmspace_free(vm); free(arg, M_TEMP); return (error); }