Exemplo n.º 1
0
static int
auto_getattr(
	vnode_t *vp,
	vattr_t *vap,
	int flags,
	cred_t *cred,
	caller_context_t *ct)
{
	fnnode_t *fnp = vntofn(vp);
	vnode_t *newvp;
	vfs_t *vfsp;
	int error;

	AUTOFS_DPRINT((4, "auto_getattr vp %p\n", (void *)vp));

	if (flags & ATTR_TRIGGER) {
		/*
		 * Pre-trigger the mount
		 */
		error = auto_trigger_mount(vp, cred, &newvp);
		if (error)
			return (error);

		if (newvp == NULL)
			goto defattr;

		if (error = vn_vfsrlock_wait(vp)) {
			VN_RELE(newvp);
			return (error);
		}

		vfsp = newvp->v_vfsp;
		VN_RELE(newvp);
	} else {
		/*
		 * Recursive auto_getattr/mount; go to the vfsp == NULL
		 * case.
		 */
		if (vn_vfswlock_held(vp))
			goto defattr;

		if (error = vn_vfsrlock_wait(vp))
			return (error);

		vfsp = vn_mountedvfs(vp);
	}

	if (vfsp != NULL) {
		/*
		 * Node is mounted on.
		 */
		error = VFS_ROOT(vfsp, &newvp);
		vn_vfsunlock(vp);
		if (error)
			return (error);
		mutex_enter(&fnp->fn_lock);
		if (fnp->fn_seen == newvp && fnp->fn_thread == curthread) {
			/*
			 * Recursive auto_getattr(); just release newvp and drop
			 * into the vfsp == NULL case.
			 */
			mutex_exit(&fnp->fn_lock);
			VN_RELE(newvp);
		} else {
			while (fnp->fn_thread && fnp->fn_thread != curthread) {
				fnp->fn_flags |= MF_ATTR_WAIT;
				cv_wait(&fnp->fn_cv_mount, &fnp->fn_lock);
			}
			fnp->fn_thread = curthread;
			fnp->fn_seen = newvp;
			mutex_exit(&fnp->fn_lock);
			error = VOP_GETATTR(newvp, vap, flags, cred, ct);
			VN_RELE(newvp);
			mutex_enter(&fnp->fn_lock);
			fnp->fn_seen = 0;
			fnp->fn_thread = 0;
			if (fnp->fn_flags & MF_ATTR_WAIT) {
				fnp->fn_flags &= ~MF_ATTR_WAIT;
				cv_broadcast(&fnp->fn_cv_mount);
			}
			mutex_exit(&fnp->fn_lock);
			return (error);
		}
	} else {
		vn_vfsunlock(vp);
	}

defattr:
	ASSERT(vp->v_type == VDIR || vp->v_type == VLNK);
	vap->va_uid	= 0;
	vap->va_gid	= 0;
	vap->va_nlink	= fnp->fn_linkcnt;
	vap->va_nodeid	= (u_longlong_t)fnp->fn_nodeid;
	vap->va_size	= fnp->fn_size;
	vap->va_atime	= fnp->fn_atime;
	vap->va_mtime	= fnp->fn_mtime;
	vap->va_ctime	= fnp->fn_ctime;
	vap->va_type	= vp->v_type;
	vap->va_mode	= fnp->fn_mode;
	vap->va_fsid	= vp->v_vfsp->vfs_dev;
	vap->va_rdev	= 0;
	vap->va_blksize	= MAXBSIZE;
	vap->va_nblocks	= (fsblkcnt64_t)btod(vap->va_size);
	vap->va_seq	= 0;

	return (0);
}
Exemplo n.º 2
0
static int
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *ashift)
{
	vdev_file_t *vf;
	vnode_t *vp;
	vattr_t vattr;
	int error;

	/*
	 * We must have a pathname, and it must be absolute.
	 */
	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
		return (EINVAL);
	}

	/*
	 * Reopen the device if it's not currently open.  Otherwise,
	 * just update the physical size of the device.
	 */
	if (vd->vdev_tsd != NULL) {
		ASSERT(vd->vdev_reopening);
		vf = vd->vdev_tsd;
		goto skip_open;
	}

	vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_SLEEP);

	/*
	 * We always open the files from the root of the global zone, even if
	 * we're in a local zone.  If the user has gotten to this point, the
	 * administrator has already decided that the pool should be available
	 * to local zone users, so the underlying devices should be as well.
	 */
	ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/');
	error = vn_openat(vd->vdev_path + 1, UIO_SYSSPACE,
	    spa_mode(vd->vdev_spa) | FOFFMAX, 0, &vp, 0, 0, rootdir, -1);

	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (error);
	}

	vf->vf_vnode = vp;

#ifdef _KERNEL
	/*
	 * Make sure it's a regular file.
	 */
	if (vp->v_type != VREG) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (ENODEV);
	}
#endif

skip_open:
	/*
	 * Determine the physical size of the file.
	 */
	vattr.va_mask = AT_SIZE;
	error = VOP_GETATTR(vf->vf_vnode, &vattr, 0, kcred, NULL);
	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (error);
	}

	*psize = vattr.va_size;
	*ashift = SPA_MINBLOCKSHIFT;

	return (0);
}
Exemplo n.º 3
0
/* ARGSUSED */
void
acl2_access(ACCESS2args *args, ACCESS2res *resp, struct exportinfo *exi,
    struct svc_req *req, cred_t *cr, bool_t ro)
{
	int error;
	vnode_t *vp;
	vattr_t va;
	int checkwriteperm;

	vp = nfs_fhtovp(&args->fh, exi);
	if (vp == NULL) {
		resp->status = NFSERR_STALE;
		return;
	}

	/*
	 * If the file system is exported read only, it is not appropriate
	 * to check write permissions for regular files and directories.
	 * Special files are interpreted by the client, so the underlying
	 * permissions are sent back to the client for interpretation.
	 */
	if (rdonly(ro, vp) && (vp->v_type == VREG || vp->v_type == VDIR))
		checkwriteperm = 0;
	else
		checkwriteperm = 1;

	/*
	 * We need the mode so that we can correctly determine access
	 * permissions relative to a mandatory lock file.  Access to
	 * mandatory lock files is denied on the server, so it might
	 * as well be reflected to the server during the open.
	 */
	va.va_mask = AT_MODE;
	error = VOP_GETATTR(vp, &va, 0, cr, NULL);
	if (error) {
		VN_RELE(vp);
		resp->status = puterrno(error);
		return;
	}

	resp->resok.access = 0;

	if (args->access & ACCESS2_READ) {
		error = VOP_ACCESS(vp, VREAD, 0, cr, NULL);
		if (!error && !MANDLOCK(vp, va.va_mode))
			resp->resok.access |= ACCESS2_READ;
	}
	if ((args->access & ACCESS2_LOOKUP) && vp->v_type == VDIR) {
		error = VOP_ACCESS(vp, VEXEC, 0, cr, NULL);
		if (!error)
			resp->resok.access |= ACCESS2_LOOKUP;
	}
	if (checkwriteperm &&
	    (args->access & (ACCESS2_MODIFY|ACCESS2_EXTEND))) {
		error = VOP_ACCESS(vp, VWRITE, 0, cr, NULL);
		if (!error && !MANDLOCK(vp, va.va_mode))
			resp->resok.access |=
			    (args->access & (ACCESS2_MODIFY|ACCESS2_EXTEND));
	}
	if (checkwriteperm &&
	    (args->access & ACCESS2_DELETE) && (vp->v_type == VDIR)) {
		error = VOP_ACCESS(vp, VWRITE, 0, cr, NULL);
		if (!error)
			resp->resok.access |= ACCESS2_DELETE;
	}
	if (args->access & ACCESS2_EXECUTE) {
		error = VOP_ACCESS(vp, VEXEC, 0, cr, NULL);
		if (!error && !MANDLOCK(vp, va.va_mode))
			resp->resok.access |= ACCESS2_EXECUTE;
	}

	va.va_mask = AT_ALL;
	error = rfs4_delegated_getattr(vp, &va, 0, cr);

	VN_RELE(vp);

	/* check for overflowed values */
	if (!error) {
		error = vattr_to_nattr(&va, &resp->resok.attr);
	}
	if (error) {
		resp->status = puterrno(error);
		return;
	}

	resp->status = NFS_OK;
}
Exemplo n.º 4
0
static int
vdev_file_open(vdev_t *vd, uint64_t *psize, uint64_t *max_psize,
    uint64_t *ashift)
{
	vdev_file_t *vf;
	vnode_t *vp;
	vattr_t vattr;
	int error = 0;
    vnode_t *rootdir;

	/*
	 * We must have a pathname, and it must be absolute.
	 */
	if (vd->vdev_path == NULL || vd->vdev_path[0] != '/') {
		vd->vdev_stat.vs_aux = VDEV_AUX_BAD_LABEL;
		return (EINVAL);
	}

	/*
	 * Reopen the device if it's not currently open.  Otherwise,
	 * just update the physical size of the device.
	 */
	if (vd->vdev_tsd != NULL) {
		ASSERT(vd->vdev_reopening);
		vf = vd->vdev_tsd;
        vnode_getwithvid(vf->vf_vnode, vf->vf_vid);
		goto skip_open;
	}

	vf = vd->vdev_tsd = kmem_zalloc(sizeof (vdev_file_t), KM_PUSHPAGE);

	/*
	 * We always open the files from the root of the global zone, even if
	 * we're in a local zone.  If the user has gotten to this point, the
	 * administrator has already decided that the pool should be available
	 * to local zone users, so the underlying devices should be as well.
	 */
	ASSERT(vd->vdev_path != NULL && vd->vdev_path[0] == '/');

    /*
      vn_openat(char *pnamep,
      enum uio_seg seg,
      int filemode,
      int createmode,
      struct vnode **vpp,
      enum create crwhy,
      mode_t umask,
      struct vnode *startvp)
      extern int vn_openat(char *pnamep, enum uio_seg seg, int filemode,
      int createmode, struct vnode **vpp, enum create crwhy,
      mode_t umask, struct vnode *startvp);
    */

    rootdir = getrootdir();

    error = vn_openat(vd->vdev_path + 1,
                      UIO_SYSSPACE,
                      spa_mode(vd->vdev_spa) | FOFFMAX,
                      0,
                      &vp,
                      0,
                      0,
                      rootdir
                      );

	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
		return (error);
	}

	vf->vf_vnode = vp;
    vf->vf_vid = vnode_vid(vp);

#ifdef _KERNEL
	/*
	 * Make sure it's a regular file.
	 */
	if (!vnode_isreg(vp)) {
        vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
        vnode_put(vf->vf_vnode);
        return (ENODEV);
    }

#endif

skip_open:
	/*
	 * Determine the physical size of the file.
	 */
	vattr.va_mask = AT_SIZE;
	error = VOP_GETATTR(vf->vf_vnode, &vattr, 0, kcred, NULL);
	if (error) {
		vd->vdev_stat.vs_aux = VDEV_AUX_OPEN_FAILED;
        vnode_put(vf->vf_vnode);
		return (error);
	}

	*max_psize = *psize = vattr.va_size;
	*ashift = SPA_MINBLOCKSHIFT;
    vnode_put(vf->vf_vnode);

	return (0);
}
Exemplo n.º 5
0
int
puffs_biowrite(struct vnode *vp, struct uio *uio, int ioflag,
    struct ucred *cred)
{
	int biosize = vp->v_mount->mnt_stat.f_iosize;
	struct buf *bp;
	struct vattr vattr;
	off_t loffset, fsize;
	int boff, bytes;
	int error = 0;
	int bcount;
	int trivial;

	KKASSERT(uio->uio_rw == UIO_WRITE);
	KKASSERT(vp->v_type == VREG);

	if (uio->uio_offset < 0)
		return EINVAL;
	if (uio->uio_resid == 0)
		return 0;

	/*
	 * If IO_APPEND then load uio_offset.  We restart here if we cannot
	 * get the append lock.
	 *
	 * We need to obtain exclusize lock if we intend to modify file size
	 * in order to guarentee the append point with multiple contending
	 * writers.
	 */
	if (ioflag & IO_APPEND) {
		/* XXXDF relock if necessary */
		KKASSERT(vn_islocked(vp) == LK_EXCLUSIVE);
		error = VOP_GETATTR(vp, &vattr);
		if (error)
			return error;
		uio->uio_offset = puffs_meta_getsize(vp);
	}

	do {
		boff = uio->uio_offset & (biosize-1);
		loffset = uio->uio_offset - boff;
		bytes = (int)szmin((unsigned)(biosize - boff), uio->uio_resid);
again:
		/*
		 * Handle direct append and file extension cases, calculate
		 * unaligned buffer size.  When extending B_CACHE will be
		 * set if possible.  See UIO_NOCOPY note below.
		 */
		fsize = puffs_meta_getsize(vp);
		if (uio->uio_offset + bytes > fsize) {
			trivial = (uio->uio_segflg != UIO_NOCOPY &&
			    uio->uio_offset <= fsize);
			puffs_meta_setsize(vp, uio->uio_offset + bytes,
			    trivial);
		}
		bp = getblk(vp, loffset, biosize, 0, 0);
		if (bp == NULL) {
			error = EINTR;
			break;
		}

		/*
		 * Actual bytes in buffer which we care about
		 */
		if (loffset + biosize < fsize)
			bcount = biosize;
		else
			bcount = (int)(fsize - loffset);

		/*
		 * Avoid a read by setting B_CACHE where the data we
		 * intend to write covers the entire buffer.  Note
		 * that the buffer may have been set to B_CACHE by
		 * puffs_meta_setsize() above or otherwise inherited the
		 * flag, but if B_CACHE isn't set the buffer may be
		 * uninitialized and must be zero'd to accomodate
		 * future seek+write's.
		 *
		 * See the comments in kern/vfs_bio.c's getblk() for
		 * more information.
		 *
		 * When doing a UIO_NOCOPY write the buffer is not
		 * overwritten and we cannot just set B_CACHE unconditionally
		 * for full-block writes.
		 */
		if (boff == 0 && bytes == biosize &&
		    uio->uio_segflg != UIO_NOCOPY) {
			bp->b_flags |= B_CACHE;
			bp->b_flags &= ~(B_ERROR | B_INVAL);
		}

		/*
		 * b_resid may be set due to file EOF if we extended out.
		 * The NFS bio code will zero the difference anyway so
		 * just acknowledged the fact and set b_resid to 0.
		 */
		if ((bp->b_flags & B_CACHE) == 0) {
			bp->b_cmd = BUF_CMD_READ;
			bp->b_bio2.bio_done = puffs_iodone;
			bp->b_bio2.bio_flags |= BIO_SYNC;
			vfs_busy_pages(vp, bp);
			error = puffs_doio(vp, &bp->b_bio2, uio->uio_td);
			if (error) {
				brelse(bp);
				break;
			}
			bp->b_resid = 0;
		}

		/*
		 * If dirtyend exceeds file size, chop it down.  This should
		 * not normally occur but there is an append race where it
		 * might occur XXX, so we log it.
		 *
		 * If the chopping creates a reverse-indexed or degenerate
		 * situation with dirtyoff/end, we 0 both of them.
		 */
		if (bp->b_dirtyend > bcount) {
			kprintf("PUFFS append race @%08llx:%d\n",
			    (long long)bp->b_bio2.bio_offset,
			    bp->b_dirtyend - bcount);
			bp->b_dirtyend = bcount;
		}

		if (bp->b_dirtyoff >= bp->b_dirtyend)
			bp->b_dirtyoff = bp->b_dirtyend = 0;

		/*
		 * If the new write will leave a contiguous dirty
		 * area, just update the b_dirtyoff and b_dirtyend,
		 * otherwise force a write rpc of the old dirty area.
		 *
		 * While it is possible to merge discontiguous writes due to
		 * our having a B_CACHE buffer ( and thus valid read data
		 * for the hole), we don't because it could lead to
		 * significant cache coherency problems with multiple clients,
		 * especially if locking is implemented later on.
		 *
		 * as an optimization we could theoretically maintain
		 * a linked list of discontinuous areas, but we would still
		 * have to commit them separately so there isn't much
		 * advantage to it except perhaps a bit of asynchronization.
		 */
		if (bp->b_dirtyend > 0 &&
		    (boff > bp->b_dirtyend ||
		    (boff + bytes) < bp->b_dirtyoff)
		   ) {
			if (bwrite(bp) == EINTR) {
				error = EINTR;
				break;
			}
			goto again;
		}

		error = uiomove(bp->b_data + boff, bytes, uio);

		/*
		 * Since this block is being modified, it must be written
		 * again and not just committed.  Since write clustering does
		 * not work for the stage 1 data write, only the stage 2
		 * commit rpc, we have to clear B_CLUSTEROK as well.
		 */
		bp->b_flags &= ~(B_NEEDCOMMIT | B_CLUSTEROK);

		if (error) {
			brelse(bp);
			break;
		}

		/*
		 * Only update dirtyoff/dirtyend if not a degenerate
		 * condition.
		 *
		 * The underlying VM pages have been marked valid by
		 * virtue of acquiring the bp.  Because the entire buffer
		 * is marked dirty we do not have to worry about cleaning
		 * out the related dirty bits (and wouldn't really know
		 * how to deal with byte ranges anyway)
		 */
		if (bytes) {
			if (bp->b_dirtyend > 0) {
				bp->b_dirtyoff = imin(boff, bp->b_dirtyoff);
				bp->b_dirtyend = imax(boff + bytes,
				    bp->b_dirtyend);
			} else {
				bp->b_dirtyoff = boff;
				bp->b_dirtyend = boff + bytes;
			}
		}

		if (ioflag & IO_SYNC) {
			if (ioflag & IO_INVAL)
				bp->b_flags |= B_NOCACHE;
			error = bwrite(bp);
			if (error)
				break;
		} else {
			bdwrite(bp);
		}
	} while (uio->uio_resid > 0 && bytes > 0);

	return error;
}
Exemplo n.º 6
0
int
smbfs_setattr(void *v)
{
	struct vop_setattr_args /* {
		struct vnode *a_vp;
		struct vattr *a_vap;
		kauth_cred_t a_cred;
	} */ *ap = v;
	struct lwp *l = curlwp;
	struct vnode *vp = ap->a_vp;
	struct smbnode *np = VTOSMB(vp);
	struct vattr *vap = ap->a_vap;
	struct timespec *mtime, *atime;
	struct smb_cred scred;
	struct smb_share *ssp = np->n_mount->sm_share;
	struct smb_vc *vcp = SSTOVC(ssp);
	u_quad_t tsize = 0;
	int isreadonly, doclose, error = 0;

	SMBVDEBUG0("\n");
	if (vap->va_flags != VNOVAL)
		return EOPNOTSUPP;
	isreadonly = (vp->v_mount->mnt_flag & MNT_RDONLY);
	/*
	 * Disallow write attempts if the filesystem is mounted read-only.
	 */
  	if ((vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL ||
	     vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL ||
	     vap->va_mode != (mode_t)VNOVAL) && isreadonly)
		return EROFS;
	smb_makescred(&scred, l, ap->a_cred);
	if (vap->va_size != VNOVAL) {
 		switch (vp->v_type) {
 		case VDIR:
 			return EISDIR;
 		case VREG:
			break;
 		default:
			return EINVAL;
  		};
		if (isreadonly)
			return EROFS;
		doclose = 0;
 		tsize = np->n_size;
 		np->n_size = vap->va_size;
		uvm_vnp_setsize(vp, vap->va_size);
		if ((np->n_flag & NOPEN) == 0) {
			error = smbfs_smb_open(np,
			    SMB_SM_DENYNONE|SMB_AM_OPENRW, &scred);
			if (error == 0)
				doclose = 1;
		}
		if (error == 0)
			error = smbfs_smb_setfsize(np, vap->va_size, &scred);
		if (doclose)
			smbfs_smb_close(ssp, np->n_fid, NULL, &scred);
		if (error) {
			np->n_size = tsize;
			uvm_vnp_setsize(vp, tsize);
			return (error);
		}
  	}
	mtime = atime = NULL;
	if (vap->va_mtime.tv_sec != VNOVAL)
		mtime = &vap->va_mtime;
	if (vap->va_atime.tv_sec != VNOVAL)
		atime = &vap->va_atime;
	if (mtime != atime) {
		error = kauth_authorize_vnode(ap->a_cred,
		    KAUTH_VNODE_WRITE_TIMES, ap->a_vp, NULL,
		    genfs_can_chtimes(ap->a_vp, vap->va_vaflags,
		    VTOSMBFS(vp)->sm_args.uid, ap->a_cred));
		if (error)
			return (error);

#if 0
		if (mtime == NULL)
			mtime = &np->n_mtime;
		if (atime == NULL)
			atime = &np->n_atime;
#endif
		/*
		 * If file is opened, then we can use handle based calls.
		 * If not, use path based ones.
		 */
		if ((np->n_flag & NOPEN) == 0) {
			if (vcp->vc_flags & SMBV_WIN95) {
				error = VOP_OPEN(vp, FWRITE, ap->a_cred);
				if (!error) {
/*				error = smbfs_smb_setfattrNT(np, 0, mtime, atime, &scred);
				VOP_GETATTR(vp, &vattr, ap->a_cred);*/
				if (mtime)
					np->n_mtime = *mtime;
				VOP_CLOSE(vp, FWRITE, ap->a_cred);
				}
			} else if (SMB_CAPS(vcp) & SMB_CAP_NT_SMBS) {
				error = smbfs_smb_setpattrNT(np, 0, mtime, atime, &scred);
			} else if (SMB_DIALECT(vcp) >= SMB_DIALECT_LANMAN2_0) {
				error = smbfs_smb_setptime2(np, mtime, atime, 0, &scred);
			} else {
				error = smbfs_smb_setpattr(np, 0, mtime, &scred);
			}
		} else {
			if (SMB_CAPS(vcp) & SMB_CAP_NT_SMBS) {
				error = smbfs_smb_setfattrNT(np, 0, mtime, atime, &scred);
			} else if (SMB_DIALECT(vcp) >= SMB_DIALECT_LANMAN1_0) {
				error = smbfs_smb_setftime(np, mtime, atime, &scred);
			} else {
				/*
				 * XXX I have no idea how to handle this for core
				 * level servers. The possible solution is to
				 * update mtime after file is closed.
				 */
			}
		}
	}
	/*
	 * Invalidate attribute cache in case if server doesn't set
	 * required attributes.
	 */
	smbfs_attr_cacheremove(vp);	/* invalidate cache */
	VOP_GETATTR(vp, vap, ap->a_cred);
	np->n_mtime.tv_sec = vap->va_mtime.tv_sec;
	VN_KNOTE(vp, NOTE_ATTRIB);
	return error;
}
Exemplo n.º 7
0
/* ARGSUSED */
static int
xattr_dir_getattr(vnode_t *vp, vattr_t *vap, int flags, cred_t *cr,
    caller_context_t *ct)
{
	timestruc_t now;
	vnode_t *pvp;
	int error;

	error = xattr_dir_realdir(vp, &pvp, LOOKUP_XATTR, cr, ct);
	if (error == 0) {
		error = VOP_GETATTR(pvp, vap, 0, cr, ct);
		if (error) {
			return (error);
		}
		vap->va_nlink += XATTRDIR_NENTS;
		vap->va_size += XATTRDIR_NENTS;
		return (0);
	}

	/*
	 * There is no real xattr directory.  Cobble together
	 * an entry using info from the parent object (if needed)
	 * plus information common to all xattrs.
	 */
	if (vap->va_mask & PARENT_ATTRMASK) {
		vattr_t pvattr;
		uint_t  off_bits;

		pvp = gfs_file_parent(vp);
		(void) memset(&pvattr, 0, sizeof (pvattr));
		pvattr.va_mask = PARENT_ATTRMASK;
		error = VOP_GETATTR(pvp, &pvattr, 0, cr, ct);
		if (error) {
			return (error);
		}

		/*
		 * VOP_GETATTR() might have turned off some bits in
		 * pvattr.va_mask.  This means that the underlying
		 * file system couldn't process those attributes.
		 * We need to make sure those bits get turned off
		 * in the vattr_t structure that gets passed back
		 * to the caller.  Figure out which bits were turned
		 * off (if any) then set pvattr.va_mask before it
		 * gets copied to the vattr_t that the caller sees.
		 */
		off_bits = (pvattr.va_mask ^ PARENT_ATTRMASK) & PARENT_ATTRMASK;
		pvattr.va_mask = vap->va_mask & ~off_bits;
		*vap = pvattr;
	}

	vap->va_type = VDIR;
	vap->va_mode = MAKEIMODE(vap->va_type, S_ISVTX | 0777);
	vap->va_fsid = vp->v_vfsp->vfs_dev;
	vap->va_nodeid = gfs_file_inode(vp);
	vap->va_nlink = XATTRDIR_NENTS+2;
	vap->va_size = vap->va_nlink;
	gethrestime(&now);
	vap->va_atime = now;
	vap->va_blksize = 0;
	vap->va_nblocks = 0;
	vap->va_seq = 0;
	return (0);
}
Exemplo n.º 8
0
/*
 * Set up nameidata for a lookup() call and do it.
 *
 * If pubflag is set, this call is done for a lookup operation on the
 * public filehandle. In that case we allow crossing mountpoints and
 * absolute pathnames. However, the caller is expected to check that
 * the lookup result is within the public fs, and deny access if
 * it is not.
 *
 * nfs_namei() clears out garbage fields that namei() might leave garbage.
 * This is mainly ni_vp and ni_dvp when an error occurs, and ni_dvp when no
 * error occurs but the parent was not requested.
 *
 * dirp may be set whether an error is returned or not, and must be
 * released by the caller.
 */
int
nfs_namei(struct nameidata *ndp, struct nfsrv_descript *nfsd,
    fhandle_t *fhp, int len, struct nfssvc_sock *slp,
    struct sockaddr *nam, struct mbuf **mdp,
    caddr_t *dposp, struct vnode **retdirp, int v3, struct vattr *retdirattrp,
    int *retdirattr_retp, int pubflag)
{
	int i, rem;
	struct mbuf *md;
	char *fromcp, *tocp, *cp;
	struct iovec aiov;
	struct uio auio;
	struct vnode *dp;
	int error, rdonly, linklen;
	struct componentname *cnp = &ndp->ni_cnd;
	int lockleaf = (cnp->cn_flags & LOCKLEAF) != 0;

	*retdirp = NULL;
	cnp->cn_flags |= NOMACCHECK;
	cnp->cn_pnbuf = uma_zalloc(namei_zone, M_WAITOK);

	/*
	 * Copy the name from the mbuf list to ndp->ni_pnbuf
	 * and set the various ndp fields appropriately.
	 */
	fromcp = *dposp;
	tocp = cnp->cn_pnbuf;
	md = *mdp;
	rem = mtod(md, caddr_t) + md->m_len - fromcp;
	for (i = 0; i < len; i++) {
		while (rem == 0) {
			md = md->m_next;
			if (md == NULL) {
				error = EBADRPC;
				goto out;
			}
			fromcp = mtod(md, caddr_t);
			rem = md->m_len;
		}
		if (*fromcp == '\0' || (!pubflag && *fromcp == '/')) {
			error = EACCES;
			goto out;
		}
		*tocp++ = *fromcp++;
		rem--;
	}
	*tocp = '\0';
	*mdp = md;
	*dposp = fromcp;
	len = nfsm_rndup(len)-len;
	if (len > 0) {
		if (rem >= len)
			*dposp += len;
		else if ((error = nfs_adv(mdp, dposp, len, rem)) != 0)
			goto out;
	}

	if (!pubflag && nfs_ispublicfh(fhp))
		return (ESTALE);

	/*
	 * Extract and set starting directory.
	 */
	error = nfsrv_fhtovp(fhp, 0, &dp, nfsd, slp, nam, &rdonly);
	if (error)
		goto out;
	if (dp->v_type != VDIR) {
		vput(dp);
		error = ENOTDIR;
		goto out;
	}

	if (rdonly)
		cnp->cn_flags |= RDONLY;

	/*
	 * Set return directory.  Reference to dp is implicitly transfered
	 * to the returned pointer
	 */
	*retdirp = dp;
	if (v3) {
		*retdirattr_retp = VOP_GETATTR(dp, retdirattrp,
			ndp->ni_cnd.cn_cred);
	}

	VOP_UNLOCK(dp, 0);

	if (pubflag) {
		/*
		 * Oh joy. For WebNFS, handle those pesky '%' escapes,
		 * and the 'native path' indicator.
		 */
		cp = uma_zalloc(namei_zone, M_WAITOK);
		fromcp = cnp->cn_pnbuf;
		tocp = cp;
		if ((unsigned char)*fromcp >= WEBNFS_SPECCHAR_START) {
			switch ((unsigned char)*fromcp) {
			case WEBNFS_NATIVE_CHAR:
				/*
				 * 'Native' path for us is the same
				 * as a path according to the NFS spec,
				 * just skip the escape char.
				 */
				fromcp++;
				break;
			/*
			 * More may be added in the future, range 0x80-0xff
			 */
			default:
				error = EIO;
				uma_zfree(namei_zone, cp);
				goto out;
			}
		}
		/*
		 * Translate the '%' escapes, URL-style.
		 */
		while (*fromcp != '\0') {
			if (*fromcp == WEBNFS_ESC_CHAR) {
				if (fromcp[1] != '\0' && fromcp[2] != '\0') {
					fromcp++;
					*tocp++ = HEXSTRTOI(fromcp);
					fromcp += 2;
					continue;
				} else {
					error = ENOENT;
					uma_zfree(namei_zone, cp);
					goto out;
				}
			} else
				*tocp++ = *fromcp++;
		}
		*tocp = '\0';
		uma_zfree(namei_zone, cnp->cn_pnbuf);
		cnp->cn_pnbuf = cp;
	}

	ndp->ni_pathlen = (tocp - cnp->cn_pnbuf) + 1;
	ndp->ni_segflg = UIO_SYSSPACE;

	if (pubflag) {
		ndp->ni_rootdir = rootvnode;
		ndp->ni_loopcnt = 0;

		if (cnp->cn_pnbuf[0] == '/')
			dp = rootvnode;
	} else {
		cnp->cn_flags |= NOCROSSMOUNT;
	}

	/*
	 * Initialize for scan, set ni_startdir and bump ref on dp again
	 * because lookup() will dereference ni_startdir.
	 */

	cnp->cn_thread = curthread;
	VREF(dp);
	ndp->ni_startdir = dp;

	if (!lockleaf)
		cnp->cn_flags |= LOCKLEAF;
	for (;;) {
		cnp->cn_nameptr = cnp->cn_pnbuf;
		/*
		 * Call lookup() to do the real work.  If an error occurs,
		 * ndp->ni_vp and ni_dvp are left uninitialized or NULL and
		 * we do not have to dereference anything before returning.
		 * In either case ni_startdir will be dereferenced and NULLed
		 * out.
		 */
		error = lookup(ndp);
		if (error)
			break;

		/*
		 * Check for encountering a symbolic link.  Trivial
		 * termination occurs if no symlink encountered.
		 * Note: zfree is safe because error is 0, so we will
		 * not zfree it again when we break.
		 */
		if ((cnp->cn_flags & ISSYMLINK) == 0) {
			if (cnp->cn_flags & (SAVENAME | SAVESTART))
				cnp->cn_flags |= HASBUF;
			else
				uma_zfree(namei_zone, cnp->cn_pnbuf);
			if (ndp->ni_vp && !lockleaf)
				VOP_UNLOCK(ndp->ni_vp, 0);
			break;
		}

		/*
		 * Validate symlink
		 */
		if ((cnp->cn_flags & LOCKPARENT) && ndp->ni_pathlen == 1)
			VOP_UNLOCK(ndp->ni_dvp, 0);
		if (!pubflag) {
			error = EINVAL;
			goto badlink2;
		}

		if (ndp->ni_loopcnt++ >= MAXSYMLINKS) {
			error = ELOOP;
			goto badlink2;
		}
		if (ndp->ni_pathlen > 1)
			cp = uma_zalloc(namei_zone, M_WAITOK);
		else
			cp = cnp->cn_pnbuf;
		aiov.iov_base = cp;
		aiov.iov_len = MAXPATHLEN;
		auio.uio_iov = &aiov;
		auio.uio_iovcnt = 1;
		auio.uio_offset = 0;
		auio.uio_rw = UIO_READ;
		auio.uio_segflg = UIO_SYSSPACE;
		auio.uio_td = NULL;
		auio.uio_resid = MAXPATHLEN;
		error = VOP_READLINK(ndp->ni_vp, &auio, cnp->cn_cred);
		if (error) {
		badlink1:
			if (ndp->ni_pathlen > 1)
				uma_zfree(namei_zone, cp);
		badlink2:
			vput(ndp->ni_vp);
			vrele(ndp->ni_dvp);
			break;
		}
		linklen = MAXPATHLEN - auio.uio_resid;
		if (linklen == 0) {
			error = ENOENT;
			goto badlink1;
		}
		if (linklen + ndp->ni_pathlen >= MAXPATHLEN) {
			error = ENAMETOOLONG;
			goto badlink1;
		}

		/*
		 * Adjust or replace path
		 */
		if (ndp->ni_pathlen > 1) {
			bcopy(ndp->ni_next, cp + linklen, ndp->ni_pathlen);
			uma_zfree(namei_zone, cnp->cn_pnbuf);
			cnp->cn_pnbuf = cp;
		} else
			cnp->cn_pnbuf[linklen] = '\0';
		ndp->ni_pathlen += linklen;

		/*
		 * Cleanup refs for next loop and check if root directory
		 * should replace current directory.  Normally ni_dvp
		 * becomes the new base directory and is cleaned up when
		 * we loop.  Explicitly null pointers after invalidation
		 * to clarify operation.
		 */
		vput(ndp->ni_vp);
		ndp->ni_vp = NULL;

		if (cnp->cn_pnbuf[0] == '/') {
			vrele(ndp->ni_dvp);
			ndp->ni_dvp = ndp->ni_rootdir;
			VREF(ndp->ni_dvp);
		}
		ndp->ni_startdir = ndp->ni_dvp;
		ndp->ni_dvp = NULL;
	}
	if (!lockleaf)
		cnp->cn_flags &= ~LOCKLEAF;

	/*
	 * nfs_namei() guarentees that fields will not contain garbage
	 * whether an error occurs or not.  This allows the caller to track
	 * cleanup state trivially.
	 */
out:
	if (error) {
		uma_zfree(namei_zone, cnp->cn_pnbuf);
		ndp->ni_vp = NULL;
		ndp->ni_dvp = NULL;
		ndp->ni_startdir = NULL;
		cnp->cn_flags &= ~HASBUF;
	} else if ((ndp->ni_cnd.cn_flags & (WANTPARENT|LOCKPARENT)) == 0) {
		ndp->ni_dvp = NULL;
	}
	return (error);
}
Exemplo n.º 9
0
/* Find parent vnode of *lvpp, return in *uvpp */
int
vfs_getcwd_scandir(struct vnode **lvpp, struct vnode **uvpp, char **bpp,
                   char *bufp, struct proc *p)
{
    int eofflag, tries, dirbuflen, len, reclen, error = 0;
    off_t off;
    struct uio uio;
    struct iovec iov;
    char *dirbuf = NULL;
    ino_t fileno;
    struct vattr va;
    struct vnode *uvp = NULL;
    struct vnode *lvp = *lvpp;
    struct componentname cn;

    tries = 0;

    /*
     * If we want the filename, get some info we need while the
     * current directory is still locked.
     */
    if (bufp != NULL) {
        error = VOP_GETATTR(lvp, &va, p->p_ucred, p);
        if (error) {
            vput(lvp);
            *lvpp = NULL;
            *uvpp = NULL;
            return (error);
        }
    }

    cn.cn_nameiop = LOOKUP;
    cn.cn_flags = ISLASTCN | ISDOTDOT | RDONLY;
    cn.cn_proc = p;
    cn.cn_cred = p->p_ucred;
    cn.cn_pnbuf = NULL;
    cn.cn_nameptr = "..";
    cn.cn_namelen = 2;
    cn.cn_consume = 0;

    /* Get parent vnode using lookup of '..' */
    error = VOP_LOOKUP(lvp, uvpp, &cn);
    if (error) {
        vput(lvp);
        *lvpp = NULL;
        *uvpp = NULL;
        return (error);
    }

    uvp = *uvpp;

    /* If we don't care about the pathname, we're done */
    if (bufp == NULL) {
        vrele(lvp);
        *lvpp = NULL;
        return (0);
    }

    fileno = va.va_fileid;

    dirbuflen = DIRBLKSIZ;

    if (dirbuflen < va.va_blocksize)
        dirbuflen = va.va_blocksize;

    dirbuf = malloc(dirbuflen, M_TEMP, M_WAITOK);

    off = 0;

    do {
        char   *cpos;
        struct dirent *dp;

        iov.iov_base = dirbuf;
        iov.iov_len = dirbuflen;

        uio.uio_iov = &iov;
        uio.uio_iovcnt = 1;
        uio.uio_offset = off;
        uio.uio_resid = dirbuflen;
        uio.uio_segflg = UIO_SYSSPACE;
        uio.uio_rw = UIO_READ;
        uio.uio_procp = p;

        eofflag = 0;

        /* Call VOP_READDIR of parent */
        error = VOP_READDIR(uvp, &uio, p->p_ucred, &eofflag);

        off = uio.uio_offset;

        /* Try again if NFS tosses its cookies */
        if (error == EINVAL && tries < 3) {
            tries++;
            off = 0;
            continue;
        } else if (error) {
            goto out; /* Old userland getcwd() behaviour */
        }

        cpos = dirbuf;
        tries = 0;

        /* Scan directory page looking for matching vnode */
        for (len = (dirbuflen - uio.uio_resid); len > 0;
                len -= reclen) {
            dp = (struct dirent *)cpos;
            reclen = dp->d_reclen;

            /* Check for malformed directory */
            if (reclen < DIRENT_RECSIZE(1)) {
                error = EINVAL;
                goto out;
            }

            if (dp->d_fileno == fileno) {
                char *bp = *bpp;
                bp -= dp->d_namlen;

                if (bp <= bufp) {
                    error = ERANGE;
                    goto out;
                }

                memmove(bp, dp->d_name, dp->d_namlen);
                error = 0;
                *bpp = bp;

                goto out;
            }

            cpos += reclen;
        }

    } while (!eofflag);

    error = ENOENT;

out:

    vrele(lvp);
    *lvpp = NULL;

    free(dirbuf, M_TEMP);

    return (error);
}
Exemplo n.º 10
0
/*
 * Most ioctl command are just converted to their NetBSD values,
 * and passed on. The ones that take structure pointers and (flag)
 * values need some massaging.
 */
int
linux_sys_ioctl(struct lwp *l, const struct linux_sys_ioctl_args *uap, register_t *retval)
{
	/* {
		syscallarg(int) fd;
		syscallarg(u_long) com;
		syscallarg(void *) data;
	} */
	int error;

	switch (LINUX_IOCGROUP(SCARG(uap, com))) {
	case 'M':
		switch(SCARG(uap, com)) {
		case LINUX_MEGARAID_CMD:
		case LINUX_MEGARAID_GET_AEN:
		{
			struct sys_ioctl_args ua;
			u_long com = 0;
			if (SCARG(uap, com) & IOC_IN)
				com |= IOC_OUT;
			if (SCARG(uap, com) & IOC_OUT)
				com |= IOC_IN;
			SCARG(&ua, fd) = SCARG(uap, fd);
			SCARG(&ua, com) = SCARG(uap, com);
			SCARG(&ua, com) &= ~IOC_DIRMASK;
			SCARG(&ua, com) |= com;
			SCARG(&ua, data) = SCARG(uap, data);
			error = sys_ioctl(l, (const void *)&ua, retval);
			break;
		}
		default:
			error = oss_ioctl_mixer(l, LINUX_TO_OSS(uap), retval);
			break;
		}
		break;
	case 'Q':
		error = oss_ioctl_sequencer(l, LINUX_TO_OSS(uap), retval);
		break;
	case 'P':
		error = oss_ioctl_audio(l, LINUX_TO_OSS(uap), retval);
		break;
	case 'V':	/* video4linux2 */
	case 'd':	/* drm */
	{
		struct sys_ioctl_args ua;
		u_long com = 0;
		if (SCARG(uap, com) & IOC_IN)
			com |= IOC_OUT;
		if (SCARG(uap, com) & IOC_OUT)
			com |= IOC_IN;
		SCARG(&ua, fd) = SCARG(uap, fd);
		SCARG(&ua, com) = SCARG(uap, com);
		SCARG(&ua, com) &= ~IOC_DIRMASK;
		SCARG(&ua, com) |= com;
		SCARG(&ua, data) = SCARG(uap, data);
		error = sys_ioctl(l, (const void *)&ua, retval);
		break;
	}
	case 'r': /* VFAT ioctls; not yet supported */
		error = ENOSYS;
		break;
	case 'S':
		error = linux_ioctl_cdrom(l, uap, retval);
		break;
	case 't':
	case 'f':
		error = linux_ioctl_termios(l, uap, retval);
		break;
	case 'm':
		error = linux_ioctl_mtio(l, uap, retval);
		break;
	case 'T':
	{
#if NSEQUENCER > 0
/* XXX XAX 2x check this. */
		/*
		 * Both termios and the MIDI sequencer use 'T' to identify
		 * the ioctl, so we have to differentiate them in another
		 * way.  We do it by indexing in the cdevsw with the major
		 * device number and check if that is the sequencer entry.
		 */
		bool is_sequencer = false;
		struct file *fp;
		struct vnode *vp;
		struct vattr va;
		extern const struct cdevsw sequencer_cdevsw;

		if ((fp = fd_getfile(SCARG(uap, fd))) == NULL)
			return EBADF;
		if (fp->f_type == DTYPE_VNODE &&
		    (vp = (struct vnode *)fp->f_data) != NULL &&
		    vp->v_type == VCHR) {
			vn_lock(vp, LK_SHARED | LK_RETRY);
			error = VOP_GETATTR(vp, &va, l->l_cred);
			VOP_UNLOCK(vp);
			if (error == 0 &&
			    cdevsw_lookup(va.va_rdev) == &sequencer_cdevsw)
				is_sequencer = true;
		}
		if (is_sequencer) {
			error = oss_ioctl_sequencer(l, (const void *)LINUX_TO_OSS(uap),
						   retval);
		}
		else {
			error = linux_ioctl_termios(l, uap, retval);
		}
		fd_putfile(SCARG(uap, fd));
#else
		error = linux_ioctl_termios(l, uap, retval);
#endif
	}
		break;
	case '"':
		error = linux_ioctl_sg(l, uap, retval);
		break;
	case 0x89:
		error = linux_ioctl_socket(l, uap, retval);
		break;
	case 0x03:
		error = linux_ioctl_hdio(l, uap, retval);
		break;
	case 0x02:
		error = linux_ioctl_fdio(l, uap, retval);
		break;
	case 0x12:
		error = linux_ioctl_blkio(l, uap, retval);
		break;
	default:
		error = linux_machdepioctl(l, uap, retval);
		break;
	}
	if (error == EPASSTHROUGH) {
		/*
		 * linux returns EINVAL or ENOTTY for not supported ioctls.
		 */ 
		error = EINVAL;
	}

	return error;
}
Exemplo n.º 11
0
int
xfs_mount_common_sys(struct mount *mp,
		     const char *path,
		     void *data,
		     struct nameidata *ndp,
		     d_thread_t *p)
{
    struct vnode *devvp;
    dev_t dev;
    int error;
    struct vattr vat;

    NNPFSDEB(XDEBVFOPS, ("xfs_mount: "
		       "struct mount mp = %lx path = '%s' data = '%s'\n",
		       (unsigned long)mp, path, (char *)data));

#ifdef ARLA_KNFS
    NNPFSDEB(XDEBVFOPS, ("xfs_mount: mount flags = %x\n", mp->mnt_flag));

    /*
     * mountd(8) flushes all export entries when it starts
     * right now we ignore it (but should not)
     */

    if (mp->mnt_flag & MNT_UPDATE ||
	mp->mnt_flag & MNT_DELEXPORT) {

	NNPFSDEB(XDEBVFOPS, 
	       ("xfs_mount: ignoreing MNT_UPDATE or MNT_DELEXPORT\n"));
	return 0;
    }
#endif

    NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF, UIO_SYSSPACE, data, p);
    error = namei(ndp);
    if (error) {
	NNPFSDEB(XDEBVFOPS, ("namei failed, errno = %d\n", error));
	return error;
    }

    devvp = ndp->ni_vp;

    if (devvp->v_type != VCHR) {
	vput(devvp);
	NNPFSDEB(XDEBVFOPS, ("not VCHR (%d)\n", devvp->v_type));
	return ENXIO;
    }
#if defined(__osf__)
    VOP_GETATTR(devvp, &vat, ndp->ni_cred, error);
#elif defined(HAVE_FREEBSD_THREAD)
    error = VOP_GETATTR(devvp, &vat, p->td_proc->p_ucred, p);
#else
    error = VOP_GETATTR(devvp, &vat, p->p_ucred, p);
#endif
    vput(devvp);
    if (error) {
	NNPFSDEB(XDEBVFOPS, ("VOP_GETATTR failed, error = %d\n", error));
	return error;
    }

    dev = VA_RDEV_TO_DEV(vat.va_rdev);

    NNPFSDEB(XDEBVFOPS, ("dev = %d.%d\n", major(dev), minor(dev)));

    if (!xfs_is_xfs_dev (dev)) {
	NNPFSDEB(XDEBVFOPS, ("%s is not a xfs device\n", (char *)data));
	return ENXIO;
    }

    if (xfs[minor(dev)].status & NNPFS_MOUNTED)
	return EBUSY;

    xfs[minor(dev)].status = NNPFS_MOUNTED;
    xfs[minor(dev)].mp = mp;
    xfs[minor(dev)].root = 0;
    xfs[minor(dev)].nnodes = 0;
    xfs[minor(dev)].fd = minor(dev);

    nnfs_init_head(&xfs[minor(dev)].nodehead);

    VFS_TO_NNPFS(mp) = &xfs[minor(dev)];
#if defined(HAVE_KERNEL_VFS_GETNEWFSID)
#if defined(HAVE_TWO_ARGUMENT_VFS_GETNEWFSID)
    vfs_getnewfsid(mp, MOUNT_AFS);
#else
    vfs_getnewfsid(mp);
#endif /* HAVE_TWO_ARGUMENT_VFS_GETNEWFSID */
#endif /* HAVE_KERNEL_VFS_GETNEWFSID */

    mp->mnt_stat.f_bsize = DEV_BSIZE;
#ifndef __osf__
    mp->mnt_stat.f_iosize = DEV_BSIZE;
    mp->mnt_stat.f_owner = 0;
#endif
    mp->mnt_stat.f_blocks = 4711 * 4711;
    mp->mnt_stat.f_bfree = 4711 * 4711;
    mp->mnt_stat.f_bavail = 4711 * 4711;
    mp->mnt_stat.f_files = 4711;
    mp->mnt_stat.f_ffree = 4711;
    mp->mnt_stat.f_flags = mp->mnt_flag;

#ifdef __osf__
    mp->mnt_stat.f_fsid.val[0] = dev;
    mp->mnt_stat.f_fsid.val[1] = MOUNT_NNPFS;
	
    MALLOC(mp->m_stat.f_mntonname, char *, strlen(path) + 1, 
	   M_PATHNAME, M_WAITOK);
    strcpy(mp->m_stat.f_mntonname, path);

    MALLOC(mp->m_stat.f_mntfromname, char *, sizeof("arla"),
	   M_PATHNAME, M_WAITOK);
    strcpy(mp->m_stat.f_mntfromname, "arla");
#else /* __osf__ */
    strncpy(mp->mnt_stat.f_mntonname,
	    path,
	    sizeof(mp->mnt_stat.f_mntonname));

    strncpy(mp->mnt_stat.f_mntfromname,
	    data,
	    sizeof(mp->mnt_stat.f_mntfromname));

    strncpy(mp->mnt_stat.f_fstypename,
	    "xfs",
	    sizeof(mp->mnt_stat.f_fstypename));
#endif /* __osf__ */

    return 0;
}
Exemplo n.º 12
0
static int
unionfs_lookup(void *v)
{
	struct vop_lookup_args *ap = v;
	int		iswhiteout;
	int		lockflag;
	int		error , uerror, lerror;
	u_long		nameiop;
	u_long		cnflags, cnflagsbk;
	struct unionfs_node *dunp;
	struct vnode   *dvp, *udvp, *ldvp, *vp, *uvp, *lvp, *dtmpvp;
	struct vattr	va;
	struct componentname *cnp;

	iswhiteout = 0;
	lockflag = 0;
	error = uerror = lerror = ENOENT;
	cnp = ap->a_cnp;
	nameiop = cnp->cn_nameiop;
	cnflags = cnp->cn_flags;
	dvp = ap->a_dvp;
	dunp = VTOUNIONFS(dvp);
	udvp = dunp->un_uppervp;
	ldvp = dunp->un_lowervp;
	vp = uvp = lvp = NULLVP;
	*(ap->a_vpp) = NULLVP;

	UNIONFS_INTERNAL_DEBUG("unionfs_lookup: enter: nameiop=%ld, flags=%lx, path=%s\n", nameiop, cnflags, cnp->cn_nameptr);

	if (dvp->v_type != VDIR)
		return (ENOTDIR);

	/*
	 * If read-only and op is not LOOKUP, will return EROFS.
	 */
	if ((cnflags & ISLASTCN) &&
	    (dvp->v_mount->mnt_flag & MNT_RDONLY) &&
	    LOOKUP != nameiop)
		return (EROFS);

	/*
	 * lookup dotdot
	 */
	if (cnflags & ISDOTDOT) {
		if (LOOKUP != nameiop && udvp == NULLVP)
			return (EROFS);

		if (udvp != NULLVP) {
			dtmpvp = udvp;
			if (ldvp != NULLVP)
				VOP_UNLOCK(ldvp);
		}
		else
			dtmpvp = ldvp;

		error = VOP_LOOKUP(dtmpvp, &vp, cnp);

		if (dtmpvp == udvp && ldvp != NULLVP) {
			VOP_UNLOCK(udvp);
			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
		}

		if (error == 0) {
			/*
			 * Exchange lock and reference from vp to
			 * dunp->un_dvp. vp is upper/lower vnode, but it
			 * will need to return the unionfs vnode.
			 */
			if (nameiop == DELETE  || nameiop == RENAME)
				VOP_UNLOCK(vp);
			vrele(vp);

			VOP_UNLOCK(dvp);
			*(ap->a_vpp) = dunp->un_dvp;
			vref(dunp->un_dvp);

			vn_lock(dunp->un_dvp, LK_EXCLUSIVE | LK_RETRY);
			vn_lock(dvp, LK_EXCLUSIVE | LK_RETRY);
		} else if (error == ENOENT && nameiop != CREATE)
			cache_enter(dvp, NULLVP, cnp->cn_nameptr,
				    cnp->cn_namelen, cnp->cn_flags);

		UNIONFS_INTERNAL_DEBUG("unionfs_lookup: leave (%d)\n", error);

		return (error);
	}

	/*
	 * lookup upper layer
	 */
	if (udvp != NULLVP) {
		uerror = VOP_LOOKUP(udvp, &uvp, cnp);

		if (uerror == 0) {
			if (udvp == uvp) {	/* is dot */
				vrele(uvp);
				*(ap->a_vpp) = dvp;
				vref(dvp);

				UNIONFS_INTERNAL_DEBUG("unionfs_lookup: leave (%d)\n", uerror);

				return (uerror);
			}
		}

		/* check whiteout */
		if (uerror == ENOENT || uerror == EJUSTRETURN)
			if (cnp->cn_flags & ISWHITEOUT)
				iswhiteout = 1;	/* don't lookup lower */
		if (iswhiteout == 0 && ldvp != NULLVP)
			if (VOP_GETATTR(udvp, &va, cnp->cn_cred) == 0 &&
			    (va.va_flags & OPAQUE))
				iswhiteout = 1;	/* don't lookup lower */
#if 0
		UNIONFS_INTERNAL_DEBUG("unionfs_lookup: debug: whiteout=%d, path=%s\n", iswhiteout, cnp->cn_nameptr);
#endif
	}

	/*
	 * lookup lower layer
	 */
	if (ldvp != NULLVP && !(cnflags & DOWHITEOUT) && iswhiteout == 0) {
		/* always op is LOOKUP */
		cnp->cn_nameiop = LOOKUP;
		cnflagsbk = cnp->cn_flags;
		cnp->cn_flags = cnflags;

		lerror = VOP_LOOKUP(ldvp, &lvp, cnp);

		cnp->cn_nameiop = nameiop;
		if (udvp != NULLVP && (uerror == 0 || uerror == EJUSTRETURN))
			cnp->cn_flags = cnflagsbk;

		if (lerror == 0) {
			if (ldvp == lvp) {	/* is dot */
				if (uvp != NULLVP)
					vrele(uvp);	/* no need? */
				vrele(lvp);
				*(ap->a_vpp) = dvp;
				vref(dvp);

				UNIONFS_INTERNAL_DEBUG("unionfs_lookup: leave (%d)\n", lerror);
				if (uvp != NULL)
					VOP_UNLOCK(uvp);
				return (lerror);
			}
		}
	}

	/*
	 * check lookup result
	 */
	if (uvp == NULLVP && lvp == NULLVP) {
		UNIONFS_INTERNAL_DEBUG("unionfs_lookup: leave (%d)\n",
		    (udvp != NULLVP ? uerror : lerror));
		return (udvp != NULLVP ? uerror : lerror);
	}

	/*
	 * check vnode type
	 */
	if (uvp != NULLVP && lvp != NULLVP && uvp->v_type != lvp->v_type) {
		vput(lvp);
		lvp = NULLVP;
	}

	/*
	 * check shadow dir
	 */
	if (uerror != 0 && uerror != EJUSTRETURN && udvp != NULLVP &&
	    lerror == 0 && lvp != NULLVP && lvp->v_type == VDIR &&
	    !(dvp->v_mount->mnt_flag & MNT_RDONLY) &&
	    (1 < cnp->cn_namelen || '.' != *(cnp->cn_nameptr))) {
		/* get unionfs vnode in order to create a new shadow dir. */
		error = unionfs_nodeget(dvp->v_mount, NULLVP, lvp, dvp, &vp,
		    cnp);
		if (error != 0)
			goto unionfs_lookup_out;
		error = unionfs_mkshadowdir(MOUNTTOUNIONFSMOUNT(dvp->v_mount),
		    udvp, VTOUNIONFS(vp), cnp);
		if (error != 0) {
			UNIONFSDEBUG("unionfs_lookup: Unable to create shadow dir.");
			vput(vp);
			goto unionfs_lookup_out;
		}
	}
	/*
	 * get unionfs vnode.
	 */
	else {
		if (uvp != NULLVP)
			error = uerror;
		else
			error = lerror;
		if (error != 0)
			goto unionfs_lookup_out;
		error = unionfs_nodeget(dvp->v_mount, uvp, lvp, dvp, &vp, cnp);
		if (error != 0) {
			UNIONFSDEBUG("unionfs_lookup: Unable to create unionfs vnode.");
			goto unionfs_lookup_out;
		}
	}

	*(ap->a_vpp) = vp;

	cache_enter(dvp, vp, cnp->cn_nameptr, cnp->cn_namelen,
		    cnp->cn_flags);

	/* XXXAD lock status on error */
unionfs_lookup_out:
	if (uvp != NULLVP)
		vrele(uvp);
	if (lvp != NULLVP)
		vrele(lvp);

	if (error == ENOENT && nameiop != CREATE)
		cache_enter(dvp, NULLVP, cnp->cn_nameptr, cnp->cn_namelen,
			    cnp->cn_flags);

	UNIONFS_INTERNAL_DEBUG("unionfs_lookup: leave (%d)\n", error);

	return (error);
}
Exemplo n.º 13
0
static int
unionfs_readdir(void *v)
{
	struct vop_readdir_args *ap = v;
	int		error;
	int		eofflag;
	int		locked;
	struct unionfs_node *unp;
	struct unionfs_node_status *unsp;
	struct uio     *uio;
	struct vnode   *uvp;
	struct vnode   *lvp;
	struct vattr    va;

	int		ncookies_bk;
	off_t          *cookies_bk;

	UNIONFS_INTERNAL_DEBUG("unionfs_readdir: enter\n");

	error = 0;
	eofflag = 0;
	locked = 0;
	unp = VTOUNIONFS(ap->a_vp);
	uio = ap->a_uio;
	uvp = unp->un_uppervp;
	lvp = unp->un_lowervp;
	ncookies_bk = 0;
	cookies_bk = NULL;

	if (ap->a_vp->v_type != VDIR)
		return (ENOTDIR);

	/* check opaque */
	if (uvp != NULLVP && lvp != NULLVP) {
		if ((error = VOP_GETATTR(uvp, &va, ap->a_cred)) != 0)
			goto unionfs_readdir_exit;
		if (va.va_flags & OPAQUE)
			lvp = NULLVP;
	}

	/* check the open count. unionfs needs to open before readdir. */
	VOP_UNLOCK(ap->a_vp);
	vn_lock(ap->a_vp, LK_EXCLUSIVE | LK_RETRY);
	unionfs_get_node_status(unp, &unsp);
	if ((uvp != NULLVP && unsp->uns_upper_opencnt <= 0) ||
	    (lvp != NULLVP && unsp->uns_lower_opencnt <= 0)) {
		unionfs_tryrem_node_status(unp, unsp);
		error = EBADF;
	}
	if (error != 0)
		goto unionfs_readdir_exit;

	/* upper only */
	if (uvp != NULLVP && lvp == NULLVP) {
		error = VOP_READDIR(uvp, uio, ap->a_cred, ap->a_eofflag,
		    ap->a_cookies, ap->a_ncookies);
		unsp->uns_readdir_status = 0;

		goto unionfs_readdir_exit;
	}

	/* lower only */
	if (uvp == NULLVP && lvp != NULLVP) {
		error = VOP_READDIR(lvp, uio, ap->a_cred, ap->a_eofflag,
		    ap->a_cookies, ap->a_ncookies);
		unsp->uns_readdir_status = 2;

		goto unionfs_readdir_exit;
	}

	/*
	 * readdir upper and lower
	 */
	KASSERT(uvp != NULLVP);
	KASSERT(lvp != NULLVP);
	if (uio->uio_offset == 0)
		unsp->uns_readdir_status = 0;

	if (unsp->uns_readdir_status == 0) {
		/* read upper */
		error = VOP_READDIR(uvp, uio, ap->a_cred, &eofflag,
				    ap->a_cookies, ap->a_ncookies);

		if (error != 0 || eofflag == 0)
			goto unionfs_readdir_exit;
		unsp->uns_readdir_status = 1;

		/*
		 * ufs(and other fs) needs size of uio_resid larger than
		 * DIRBLKSIZ.
		 * size of DIRBLKSIZ equals DEV_BSIZE.
		 * (see: ufs/ufs/ufs_vnops.c ufs_readdir func , ufs/ufs/dir.h)
		 */
		if (uio->uio_resid <= (uio->uio_resid & (DEV_BSIZE -1)))
			goto unionfs_readdir_exit;

		/*
		 * backup cookies
		 * It prepares to readdir in lower.
		 */
		if (ap->a_ncookies != NULL) {
			ncookies_bk = *(ap->a_ncookies);
			*(ap->a_ncookies) = 0;
		}
		if (ap->a_cookies != NULL) {
			cookies_bk = *(ap->a_cookies);
			*(ap->a_cookies) = NULL;
		}
	}

	/* initialize for readdir in lower */
	if (unsp->uns_readdir_status == 1) {
		unsp->uns_readdir_status = 2;
		uio->uio_offset = 0;
	}

	if (lvp == NULLVP) {
		error = EBADF;
		goto unionfs_readdir_exit;
	}
	/* read lower */
	error = VOP_READDIR(lvp, uio, ap->a_cred, ap->a_eofflag,
			    ap->a_cookies, ap->a_ncookies);

	if (cookies_bk != NULL) {
		/* merge cookies */
		int		size;
		off_t         *newcookies, *pos;

		size = *(ap->a_ncookies) + ncookies_bk;
		newcookies = (off_t *) malloc(size * sizeof(off_t),
		    M_TEMP, M_WAITOK);
		pos = newcookies;

		memcpy(pos, cookies_bk, ncookies_bk * sizeof(off_t));
		pos += ncookies_bk * sizeof(off_t);
		memcpy(pos, *(ap->a_cookies), *(ap->a_ncookies) * sizeof(off_t));
		free(cookies_bk, M_TEMP);
		free(*(ap->a_cookies), M_TEMP);
		*(ap->a_ncookies) = size;
		*(ap->a_cookies) = newcookies;
	}

unionfs_readdir_exit:
	if (error != 0 && ap->a_eofflag != NULL)
		*(ap->a_eofflag) = 1;

	UNIONFS_INTERNAL_DEBUG("unionfs_readdir: leave (%d)\n", error);

	return (error);
}
Exemplo n.º 14
0
int
secadm_vnode_check_exec(struct ucred *ucred, struct vnode *vp,
    struct label *vplabel, struct image_params *imgp,
    struct label *execlabel)
{
	struct rm_priotracker tracker;
	struct secadm_prison_entry *entry;
	secadm_rule_t *rule;
	struct vattr vap;
	size_t i;
	int err=0, flags=0;

	entry = get_prison_list_entry(ucred->cr_prison->pr_name, 0);
	if (entry == NULL)
		return (0);

	err = VOP_GETATTR(imgp->vp, &vap, ucred);
	if (err)
		return (err);

	SPL_RLOCK(entry, tracker);
	for (rule = entry->spl_rules; rule != NULL; rule = rule->sr_next) {
		if (vap.va_fileid != rule->sr_inode)
			continue;

		if (strcmp(imgp->vp->v_mount->mnt_stat.f_mntonname,
		    rule->sr_mount))
			continue;

		for (i=0; i < rule->sr_nfeatures; i++) {
			switch(rule->sr_features[i].sf_type) {
			case pageexec_enabled:
				flags |= PAX_NOTE_PAGEEXEC;
				break;
			case pageexec_disabled:
				flags |= PAX_NOTE_NOPAGEEXEC;
				break;
			case mprotect_enabled:
				flags |= PAX_NOTE_MPROTECT;
				break;
			case mprotect_disabled:
				flags |= PAX_NOTE_NOMPROTECT;
				break;
			case segvguard_enabled:
				flags |= PAX_NOTE_SEGVGUARD;
				break;
			case segvguard_disabled:
				flags |= PAX_NOTE_NOSEGVGUARD;
				break;
			case aslr_enabled:
				flags |= PAX_NOTE_ASLR;
				break;
			case aslr_disabled:
				flags |= PAX_NOTE_NOASLR;
				break;
			case integriforce:
				err = do_integriforce_check(rule, &vap, imgp->vp, ucred);
				break;
#if __HardenedBSD_version > 21
			case shlibrandom_enabled:
				flags |= PAX_NOTE_SHLIBRANDOM;
				break;
			case shlibrandom_disabled:
				flags |= PAX_NOTE_NOSHLIBRANDOM;
				break;
#endif
			default:
				break;
			}
		}

		break;
	}

	SPL_RUNLOCK(entry, tracker);

	if (err == 0 && flags)
		err = pax_elf(imgp, flags);

	return (err);
}
Exemplo n.º 15
0
/*
 * check exec:
 * given an "executable" described in the exec package's namei info,
 * see what we can do with it.
 *
 * ON ENTRY:
 *	exec package with appropriate namei info
 *	proc pointer of exec'ing proc
 *	NO SELF-LOCKED VNODES
 *
 * ON EXIT:
 *	error:	nothing held, etc.  exec header still allocated.
 *	ok:	filled exec package, one locked vnode.
 *
 * EXEC SWITCH ENTRY:
 * 	Locked vnode to check, exec package, proc.
 *
 * EXEC SWITCH EXIT:
 *	ok:	return 0, filled exec package, one locked vnode.
 *	error:	destructive:
 *			everything deallocated except exec header.
 *		non-destructive:
 *			error code, locked vnode, exec header unmodified
 */
int
check_exec(struct proc *p, struct exec_package *epp)
{
	int error, i;
	struct vnode *vp;
	struct nameidata *ndp;
	size_t resid;

	ndp = epp->ep_ndp;
	ndp->ni_cnd.cn_nameiop = LOOKUP;
	ndp->ni_cnd.cn_flags = FOLLOW | LOCKLEAF | SAVENAME;
	/* first get the vnode */
	if ((error = namei(ndp)) != 0)
		return (error);
	epp->ep_vp = vp = ndp->ni_vp;

	/* check for regular file */
	if (vp->v_type == VDIR) {
		error = EISDIR;
		goto bad1;
	}
	if (vp->v_type != VREG) {
		error = EACCES;
		goto bad1;
	}

	/* get attributes */
	if ((error = VOP_GETATTR(vp, epp->ep_vap, p->p_ucred, p)) != 0)
		goto bad1;

	/* Check mount point */
	if (vp->v_mount->mnt_flag & MNT_NOEXEC) {
		error = EACCES;
		goto bad1;
	}

	if ((vp->v_mount->mnt_flag & MNT_NOSUID))
		epp->ep_vap->va_mode &= ~(VSUID | VSGID);

	/* check access.  for root we have to see if any exec bit on */
	if ((error = VOP_ACCESS(vp, VEXEC, p->p_ucred, p)) != 0)
		goto bad1;
	if ((epp->ep_vap->va_mode & (S_IXUSR | S_IXGRP | S_IXOTH)) == 0) {
		error = EACCES;
		goto bad1;
	}

	/* try to open it */
	if ((error = VOP_OPEN(vp, FREAD, p->p_ucred, p)) != 0)
		goto bad1;

	/* unlock vp, we need it unlocked from here */
	VOP_UNLOCK(vp, 0, p);

	/* now we have the file, get the exec header */
	error = vn_rdwr(UIO_READ, vp, epp->ep_hdr, epp->ep_hdrlen, 0,
	    UIO_SYSSPACE, 0, p->p_ucred, &resid, p);
	if (error)
		goto bad2;
	epp->ep_hdrvalid = epp->ep_hdrlen - resid;

	/*
	 * set up the vmcmds for creation of the process
	 * address space
	 */
	error = ENOEXEC;
	for (i = 0; i < nexecs && error != 0; i++) {
		int newerror;

		if (execsw[i].es_check == NULL)
			continue;
		newerror = (*execsw[i].es_check)(p, epp);
		if (!newerror && !(epp->ep_emul->e_flags & EMUL_ENABLED))
			newerror = EPERM;
		/* make sure the first "interesting" error code is saved. */
		if (!newerror || error == ENOEXEC)
			error = newerror;
		if (epp->ep_flags & EXEC_DESTR && error != 0)
			return (error);
	}
	if (!error) {
		/* check that entry point is sane */
		if (epp->ep_entry > VM_MAXUSER_ADDRESS) {
			error = ENOEXEC;
		}

		/* check limits */
		if ((epp->ep_tsize > MAXTSIZ) ||
		    (epp->ep_dsize > p->p_rlimit[RLIMIT_DATA].rlim_cur))
			error = ENOMEM;

		if (!error)
			return (0);
	}

	/*
	 * free any vmspace-creation commands,
	 * and release their references
	 */
	kill_vmcmds(&epp->ep_vmcmds);

bad2:
	/*
	 * close the vnode, free the pathname buf, and punt.
	 */
	vn_close(vp, FREAD, p->p_ucred, p);
	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
	return (error);

bad1:
	/*
	 * free the namei pathname buffer, and put the vnode
	 * (which we don't yet have open).
	 */
	pool_put(&namei_pool, ndp->ni_cnd.cn_pnbuf);
	vput(vp);
	return (error);
}
Exemplo n.º 16
0
int
loadfirmware(const char *name, u_char **bufp, size_t *buflen)
{
	struct proc *p = curproc;
	struct nameidata nid;
	char *path, *ptr;
	struct iovec iov;
	struct uio uio;
	struct vattr va;
	int error;

	if (!rootvp || !vcount(rootvp))
		return (EIO);

	path = malloc(MAXPATHLEN, M_TEMP, M_NOWAIT);
	if (path == NULL)
		return (ENOMEM);

	if (snprintf(path, MAXPATHLEN, "/etc/firmware/%s", name) >=
	    MAXPATHLEN) {
		error = ENAMETOOLONG;
		goto err;
	}

	NDINIT(&nid, LOOKUP, NOFOLLOW|LOCKLEAF, UIO_SYSSPACE, path, p);
	error = namei(&nid);
	if (error)
		goto err;
	error = VOP_GETATTR(nid.ni_vp, &va, p->p_ucred);
	if (error)
		goto fail;
	if (nid.ni_vp->v_type != VREG || va.va_size == 0) {
		error = EINVAL;
		goto fail;
	}
	if (va.va_size > FIRMWARE_MAX) {
		error = E2BIG;
		goto fail;
	}
	ptr = malloc(va.va_size, M_DEVBUF, M_NOWAIT);
	if (ptr == NULL) {
		error = ENOMEM;
		goto fail;
	}

	iov.iov_base = ptr;
	iov.iov_len = va.va_size;
	uio.uio_iov = &iov;
	uio.uio_iovcnt = 1;
	uio.uio_offset = 0;
	uio.uio_resid = va.va_size;
	uio.uio_segflg = UIO_SYSSPACE;
	uio.uio_rw = UIO_READ;
	uio.uio_procp = p;

	error = VOP_READ(nid.ni_vp, &uio, 0, p->p_ucred);

	if (error == 0) {
		*bufp = ptr;
		*buflen = va.va_size;
	} else
		free(ptr, M_DEVBUF, va.va_size);

fail:
	vput(nid.ni_vp);
err:
	free(path, M_TEMP, MAXPATHLEN);
	return (error);
}
Exemplo n.º 17
0
/* ARGSUSED */
int
smbfs_open(void *v)
{
	struct vop_open_args /* {
		struct vnode *a_vp;
		int  a_mode;
		kauth_cred_t a_cred;
	} */ *ap = v;
	struct lwp *l = curlwp;
	struct vnode *vp = ap->a_vp;
	struct smbnode *np = VTOSMB(vp);
	struct smb_cred scred;
	struct vattr vattr;
	u_int32_t sv_caps = SMB_CAPS(SSTOVC(np->n_mount->sm_share));
	int error, accmode;

	SMBVDEBUG("%.*s,%d\n", (int) np->n_nmlen, np->n_name,
	    (np->n_flag & NOPEN) != 0);
	if (vp->v_type != VREG && vp->v_type != VDIR) {
		SMBFSERR("open eacces vtype=%d\n", vp->v_type);
		return EACCES;
	}
	if (vp->v_type == VDIR) {
		if ((sv_caps & SMB_CAP_NT_SMBS) == 0) {
			np->n_flag |= NOPEN;
			return 0;
		}
		goto do_open;	/* skip 'modified' check */
	}

	if (np->n_flag & NMODIFIED) {
		if ((error = smbfs_vinvalbuf(vp, V_SAVE, ap->a_cred, l, 1)) == EINTR)
			return error;
		smbfs_attr_cacheremove(vp);
		error = VOP_GETATTR(vp, &vattr, ap->a_cred);
		if (error)
			return error;
		np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
	} else {
		error = VOP_GETATTR(vp, &vattr, ap->a_cred);
		if (error)
			return error;
		if (np->n_mtime.tv_sec != vattr.va_mtime.tv_sec) {
			error = smbfs_vinvalbuf(vp, V_SAVE, ap->a_cred, l, 1);
			if (error == EINTR)
				return error;
			np->n_mtime.tv_sec = vattr.va_mtime.tv_sec;
		}
	}

do_open:
	if ((np->n_flag & NOPEN) != 0)
		return 0;

	smb_makescred(&scred, l, ap->a_cred);
	if (vp->v_type == VDIR)
		error = smbfs_smb_ntcreatex(np,
		    SMB_SM_DENYNONE|SMB_AM_OPENREAD, &scred);
	else {
		/*
		 * Use DENYNONE to give unixy semantics of permitting
		 * everything not forbidden by permissions.  Ie denial
		 * is up to server with clients/openers needing to use
		 * advisory locks for further control.
		 */
		accmode = SMB_SM_DENYNONE|SMB_AM_OPENREAD;
		if ((vp->v_mount->mnt_flag & MNT_RDONLY) == 0)
			accmode = SMB_SM_DENYNONE|SMB_AM_OPENRW;
		error = smbfs_smb_open(np, accmode, &scred);
		if (error) {
			if (ap->a_mode & FWRITE)
				return EACCES;

			error = smbfs_smb_open(np,
			    SMB_SM_DENYNONE|SMB_AM_OPENREAD, &scred);
		}
	}
	if (!error)
		np->n_flag |= NOPEN;
	smbfs_attr_cacheremove(vp);
	return error;
}
Exemplo n.º 18
0
int
xfs_fhlookup (d_thread_t *proc,
	      struct xfs_fhandle_t *fhp,
	      struct vnode **vpp)
{
    int error;
    struct mount *mp;
#if !(defined(HAVE_GETFH) && defined(HAVE_FHOPEN))
    struct ucred *cred = proc->p_ucred;
    struct vattr vattr;
    fsid_t fsid;
    struct xfs_fh_args *fh_args = (struct xfs_fh_args *)fhp->fhdata;

    NNPFSDEB(XDEBVFOPS, ("xfs_fhlookup (xfs)\n"));

    error = xfs_suser (proc);
    if (error)
	return EPERM;

    if (fhp->len < sizeof(struct xfs_fh_args))
	return EINVAL;
    
    fsid = SCARG(fh_args, fsid);

    mp = xfs_vfs_getvfs (&fsid);
    if (mp == NULL)
	return ENXIO;

#ifdef __APPLE__
    {
	uint32_t ino = SCARG(fh_args, fileid);
	error = VFS_VGET(mp, &ino, vpp);
    }
#else
    error = VFS_VGET(mp, SCARG(fh_args, fileid), vpp);
#endif

    if (error)
	return error;

    if (*vpp == NULL)
	return ENOENT;

    error = VOP_GETATTR(*vpp, &vattr, cred, proc);
    if (error) {
	vput(*vpp);
	return error;
    }

    if (vattr.va_gen != SCARG(fh_args, gen)) {
	vput(*vpp);
	return ENOENT;
    }
#else /* HAVE_GETFH && HAVE_FHOPEN */
    {
	fhandle_t *fh = (fhandle_t *) fhp;

	NNPFSDEB(XDEBVFOPS, ("xfs_fhlookup (native)\n"));

	mp = xfs_vfs_getvfs (&fh->fh_fsid);
	if (mp == NULL)
	    return ESTALE;

	if ((error = VFS_FHTOVP(mp, &fh->fh_fid, vpp)) != 0) {
	    *vpp = NULL;
	    return error;
	}
    }
#endif  /* HAVE_GETFH && HAVE_FHOPEN */

#ifdef HAVE_KERNEL_VFS_OBJECT_CREATE
    if ((*vpp)->v_type == VREG && (*vpp)->v_object == NULL)
#ifdef HAVE_FREEBSD_THREAD
	xfs_vfs_object_create (*vpp, proc, proc->td_proc->p_ucred);
#else
	xfs_vfs_object_create (*vpp, proc, proc->p_ucred);
#endif
#elif __APPLE__
    if ((*vpp)->v_type == VREG && (!UBCINFOEXISTS(*vpp))) {
        ubc_info_init(*vpp);
    }
    ubc_hold(*vpp);
#endif
    return 0;
}
Exemplo n.º 19
0
/* Do a complete copyback. */
void
rf_CopybackReconstructedData(RF_Raid_t *raidPtr)
{
	RF_ComponentLabel_t c_label;
	int done, retcode;
	RF_CopybackDesc_t *desc;
	RF_RowCol_t frow, fcol;
	RF_RaidDisk_t *badDisk;
	char *databuf;

	struct partinfo dpart;
	struct vnode *vp;
	struct vattr va;
	struct proc *proc;

	int ac;

	done = 0;
	fcol = 0;
	for (frow = 0; frow < raidPtr->numRow; frow++) {
		for (fcol = 0; fcol < raidPtr->numCol; fcol++) {
			if (raidPtr->Disks[frow][fcol].status ==
			     rf_ds_dist_spared ||
			    raidPtr->Disks[frow][fcol].status ==
			     rf_ds_spared) {
				done = 1;
				break;
			}
		}
		if (done)
			break;
	}

	if (frow == raidPtr->numRow) {
		printf("COPYBACK: No disks need copyback.\n");
		return;
	}
	badDisk = &raidPtr->Disks[frow][fcol];

	proc = raidPtr->engine_thread;

	/*
	 * This device may have been opened successfully the first time.
	 * Close it before trying to open it again.
	 */

	if (raidPtr->raid_cinfo[frow][fcol].ci_vp != NULL) {
		printf("Close the opened device: %s.\n",
		    raidPtr->Disks[frow][fcol].devname);
 		vp = raidPtr->raid_cinfo[frow][fcol].ci_vp;
 		ac = raidPtr->Disks[frow][fcol].auto_configured;
 		rf_close_component(raidPtr, vp, ac);
		raidPtr->raid_cinfo[frow][fcol].ci_vp = NULL;

	}
 	/* Note that this disk was *not* auto_configured (any longer). */
 	raidPtr->Disks[frow][fcol].auto_configured = 0;

	printf("About to (re-)open the device: %s.\n",
	    raidPtr->Disks[frow][fcol].devname);

	retcode = raidlookup(raidPtr->Disks[frow][fcol].devname, proc, &vp);

	if (retcode) {
		printf("COPYBACK: raidlookup on device: %s failed: %d !\n",
		    raidPtr->Disks[frow][fcol].devname, retcode);

		/*
		 * XXX The component isn't responding properly... Must be
		 * still dead :-(
		 */
		return;

	} else {

		/*
		 * Ok, so we can at least do a lookup...
		 * How about actually getting a vp for it ?
		 */

		if ((retcode = VOP_GETATTR(vp, &va, proc->p_ucred, proc)) != 0)
		{
			return;
		}
		retcode = VOP_IOCTL(vp, DIOCGPART, (caddr_t) &dpart, FREAD,
		    proc->p_ucred, proc);
		if (retcode) {
			return;
		}
		raidPtr->Disks[frow][fcol].blockSize = dpart.disklab->d_secsize;

		raidPtr->Disks[frow][fcol].numBlocks = dpart.part->p_size -
		    rf_protectedSectors;

		raidPtr->raid_cinfo[frow][fcol].ci_vp = vp;
		raidPtr->raid_cinfo[frow][fcol].ci_dev = va.va_rdev;

		/* XXX Or the above ? */
		raidPtr->Disks[frow][fcol].dev = va.va_rdev;

		/*
		 * We allow the user to specify that only a fraction of the
		 * disks should be used this is just for debug: it speeds up
		 * the parity scan.
		 */
		raidPtr->Disks[frow][fcol].numBlocks =
		    raidPtr->Disks[frow][fcol].numBlocks *
		    rf_sizePercentage / 100;
	}
#if 0
	/* This is the way it was done before the CAM stuff was removed. */

	if (rf_extract_ids(badDisk->devname, &bus, &targ, &lun)) {
		printf("COPYBACK: unable to extract bus, target, lun from"
		    " devname %s.\n", badDisk->devname);
		return;
	}
	/*
	 * TUR the disk that's marked as bad to be sure that it's actually
	 * alive.
	 */
	rf_SCSI_AllocTUR(&tur_op);
	retcode = rf_SCSI_DoTUR(tur_op, bus, targ, lun, badDisk->dev);
	rf_SCSI_FreeDiskOp(tur_op, 0);
#endif

	if (retcode) {
		printf("COPYBACK: target disk failed TUR.\n");
		return;
	}
	/* Get a buffer to hold one SU. */
	RF_Malloc(databuf, rf_RaidAddressToByte(raidPtr,
	    raidPtr->Layout.sectorsPerStripeUnit), (char *));

	/* Create a descriptor. */
	RF_Malloc(desc, sizeof(*desc), (RF_CopybackDesc_t *));
	desc->raidPtr = raidPtr;
	desc->status = 0;
	desc->frow = frow;
	desc->fcol = fcol;
	desc->spRow = badDisk->spareRow;
	desc->spCol = badDisk->spareCol;
	desc->stripeAddr = 0;
	desc->sectPerSU = raidPtr->Layout.sectorsPerStripeUnit;
	desc->sectPerStripe = raidPtr->Layout.sectorsPerStripeUnit *
	    raidPtr->Layout.numDataCol;
	desc->databuf = databuf;
	desc->mcpair = rf_AllocMCPair();

	printf("COPYBACK: Quiescing the array.\n");
	/*
	 * Quiesce the array, since we don't want to code support for user
	 * accs here.
	 */
	rf_SuspendNewRequestsAndWait(raidPtr);

	/* Adjust state of the array and of the disks. */
	RF_LOCK_MUTEX(raidPtr->mutex);
	raidPtr->Disks[desc->frow][desc->fcol].status = rf_ds_optimal;
	raidPtr->status[desc->frow] = rf_rs_optimal;
	rf_copyback_in_progress = 1;	/* Debug only. */
	RF_UNLOCK_MUTEX(raidPtr->mutex);

	printf("COPYBACK: Beginning\n");
	RF_GETTIME(desc->starttime);
	rf_ContinueCopyback(desc);

	/*
	 * Data has been restored.
	 * Fix up the component label.
	 * Don't actually need the read here.
	 */
	raidread_component_label(raidPtr->raid_cinfo[frow][fcol].ci_dev,
				 raidPtr->raid_cinfo[frow][fcol].ci_vp,
				 &c_label);

	raid_init_component_label(raidPtr, &c_label);

	c_label.row = frow;
	c_label.column = fcol;

	raidwrite_component_label(raidPtr->raid_cinfo[frow][fcol].ci_dev,
				  raidPtr->raid_cinfo[frow][fcol].ci_vp,
				  &c_label);
}
Exemplo n.º 20
0
static int
smbfs_node_alloc(struct mount *mp, struct vnode *dvp,
	const char *name, int nmlen, struct smbfattr *fap, struct vnode **vpp)
{
	struct vattr vattr;
	struct thread *td = curthread;	/* XXX */
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct smbnode_hashhead *nhpp;
	struct smbnode *np, *np2, *dnp;
	struct vnode *vp;
	u_long hashval;
	int error;

	*vpp = NULL;
	if (smp->sm_root != NULL && dvp == NULL) {
		SMBERROR("do not allocate root vnode twice!\n");
		return EINVAL;
	}
	if (nmlen == 2 && bcmp(name, "..", 2) == 0) {
		if (dvp == NULL)
			return EINVAL;
		vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode;
		error = vget(vp, LK_EXCLUSIVE, td);
		if (error == 0)
			*vpp = vp;
		return error;
	} else if (nmlen == 1 && name[0] == '.') {
		SMBERROR("do not call me with dot!\n");
		return EINVAL;
	}
	dnp = dvp ? VTOSMB(dvp) : NULL;
	if (dnp == NULL && dvp != NULL) {
		vprint("smbfs_node_alloc: dead parent vnode", dvp);
		return EINVAL;
	}
	hashval = smbfs_hash(name, nmlen);
retry:
	smbfs_hash_lock(smp);
loop:
	nhpp = SMBFS_NOHASH(smp, hashval);
	LIST_FOREACH(np, nhpp, n_hash) {
		vp = SMBTOV(np);
		if (np->n_parent != dvp ||
		    np->n_nmlen != nmlen || bcmp(name, np->n_name, nmlen) != 0)
			continue;
		VI_LOCK(vp);
		smbfs_hash_unlock(smp);
		if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td) != 0)
			goto retry;
		/* Force cached attributes to be refreshed if stale. */
		(void)VOP_GETATTR(vp, &vattr, td->td_ucred);
		/*
		 * If the file type on the server is inconsistent with
		 * what it was when we created the vnode, kill the
		 * bogus vnode now and fall through to the code below
		 * to create a new one with the right type.
		 */
		if ((vp->v_type == VDIR && (np->n_dosattr & SMB_FA_DIR) == 0) ||
		    (vp->v_type == VREG && (np->n_dosattr & SMB_FA_DIR) != 0)) {
			vgone(vp);
			vput(vp);
			break;
		}
		*vpp = vp;
		return 0;
	}
Exemplo n.º 21
0
/* ARGSUSED */
static int
xattr_fill_nvlist(vnode_t *vp, xattr_view_t xattr_view, nvlist_t *nvlp,
    cred_t *cr, caller_context_t *ct)
{
	int error;
	f_attr_t attr;
	uint64_t fsid;
	xvattr_t xvattr;
	xoptattr_t *xoap;	/* Pointer to optional attributes */
	vnode_t *ppvp;
	const char *domain;
	uint32_t rid;

	xva_init(&xvattr);

	if ((xoap = xva_getxoptattr(&xvattr)) == NULL)
		return (EINVAL);

	/*
	 * For detecting ephemeral uid/gid
	 */
	xvattr.xva_vattr.va_mask |= (AT_UID|AT_GID);

	/*
	 * We need to access the real fs object.
	 * vp points to a GFS file; ppvp points to the real object.
	 */
	ppvp = gfs_file_parent(gfs_file_parent(vp));

	/*
	 * Iterate through the attrs associated with this view
	 */

	for (attr = 0; attr < F_ATTR_ALL; attr++) {
		if (xattr_view != attr_to_xattr_view(attr)) {
			continue;
		}

		switch (attr) {
		case F_SYSTEM:
			XVA_SET_REQ(&xvattr, XAT_SYSTEM);
			break;
		case F_READONLY:
			XVA_SET_REQ(&xvattr, XAT_READONLY);
			break;
		case F_HIDDEN:
			XVA_SET_REQ(&xvattr, XAT_HIDDEN);
			break;
		case F_ARCHIVE:
			XVA_SET_REQ(&xvattr, XAT_ARCHIVE);
			break;
		case F_IMMUTABLE:
			XVA_SET_REQ(&xvattr, XAT_IMMUTABLE);
			break;
		case F_APPENDONLY:
			XVA_SET_REQ(&xvattr, XAT_APPENDONLY);
			break;
		case F_NOUNLINK:
			XVA_SET_REQ(&xvattr, XAT_NOUNLINK);
			break;
		case F_OPAQUE:
			XVA_SET_REQ(&xvattr, XAT_OPAQUE);
			break;
		case F_NODUMP:
			XVA_SET_REQ(&xvattr, XAT_NODUMP);
			break;
		case F_AV_QUARANTINED:
			XVA_SET_REQ(&xvattr, XAT_AV_QUARANTINED);
			break;
		case F_AV_MODIFIED:
			XVA_SET_REQ(&xvattr, XAT_AV_MODIFIED);
			break;
		case F_AV_SCANSTAMP:
			if (ppvp->v_type == VREG)
				XVA_SET_REQ(&xvattr, XAT_AV_SCANSTAMP);
			break;
		case F_CRTIME:
			XVA_SET_REQ(&xvattr, XAT_CREATETIME);
			break;
		case F_FSID:
			fsid = (((uint64_t)vp->v_vfsp->vfs_fsid.val[0] << 32) |
			    (uint64_t)(vp->v_vfsp->vfs_fsid.val[1] &
			    0xffffffff));
			VERIFY(nvlist_add_uint64(nvlp, attr_to_name(attr),
			    fsid) == 0);
			break;
		case F_REPARSE:
			XVA_SET_REQ(&xvattr, XAT_REPARSE);
			break;
		case F_GEN:
			XVA_SET_REQ(&xvattr, XAT_GEN);
			break;
		case F_OFFLINE:
			XVA_SET_REQ(&xvattr, XAT_OFFLINE);
			break;
		case F_SPARSE:
			XVA_SET_REQ(&xvattr, XAT_SPARSE);
			break;
		default:
			break;
		}
	}

	error = VOP_GETATTR(ppvp, &xvattr.xva_vattr, 0, cr, ct);
	if (error)
		return (error);

	/*
	 * Process all the optional attributes together here.  Notice that
	 * xoap was set when the optional attribute bits were set above.
	 */
	if ((xvattr.xva_vattr.va_mask & AT_XVATTR) && xoap) {
		if (XVA_ISSET_RTN(&xvattr, XAT_READONLY)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_READONLY),
			    xoap->xoa_readonly) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_HIDDEN)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_HIDDEN),
			    xoap->xoa_hidden) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_SYSTEM)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_SYSTEM),
			    xoap->xoa_system) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_ARCHIVE)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_ARCHIVE),
			    xoap->xoa_archive) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_IMMUTABLE)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_IMMUTABLE),
			    xoap->xoa_immutable) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_NOUNLINK)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_NOUNLINK),
			    xoap->xoa_nounlink) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_APPENDONLY)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_APPENDONLY),
			    xoap->xoa_appendonly) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_NODUMP)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_NODUMP),
			    xoap->xoa_nodump) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_OPAQUE)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_OPAQUE),
			    xoap->xoa_opaque) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_AV_QUARANTINED)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_AV_QUARANTINED),
			    xoap->xoa_av_quarantined) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_AV_MODIFIED)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_AV_MODIFIED),
			    xoap->xoa_av_modified) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_AV_SCANSTAMP)) {
			VERIFY(nvlist_add_uint8_array(nvlp,
			    attr_to_name(F_AV_SCANSTAMP),
			    xoap->xoa_av_scanstamp,
			    sizeof (xoap->xoa_av_scanstamp)) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_CREATETIME)) {
			VERIFY(nvlist_add_uint64_array(nvlp,
			    attr_to_name(F_CRTIME),
			    (uint64_t *)&(xoap->xoa_createtime),
			    sizeof (xoap->xoa_createtime) /
			    sizeof (uint64_t)) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_REPARSE)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_REPARSE),
			    xoap->xoa_reparse) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_GEN)) {
			VERIFY(nvlist_add_uint64(nvlp,
			    attr_to_name(F_GEN),
			    xoap->xoa_generation) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_OFFLINE)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_OFFLINE),
			    xoap->xoa_offline) == 0);
		}
		if (XVA_ISSET_RTN(&xvattr, XAT_SPARSE)) {
			VERIFY(nvlist_add_boolean_value(nvlp,
			    attr_to_name(F_SPARSE),
			    xoap->xoa_sparse) == 0);
		}
	}
	/*
	 * Check for optional ownersid/groupsid
	 */

	if (xvattr.xva_vattr.va_uid > MAXUID) {
		nvlist_t *nvl_sid;

		if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP))
			return (ENOMEM);

		if (kidmap_getsidbyuid(crgetzone(cr), xvattr.xva_vattr.va_uid,
		    &domain, &rid) == 0) {
			VERIFY(nvlist_add_string(nvl_sid,
			    SID_DOMAIN, domain) == 0);
			VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0);
			VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_OWNERSID),
			    nvl_sid) == 0);
		}
		nvlist_free(nvl_sid);
	}
	if (xvattr.xva_vattr.va_gid > MAXUID) {
		nvlist_t *nvl_sid;

		if (nvlist_alloc(&nvl_sid, NV_UNIQUE_NAME, KM_SLEEP))
			return (ENOMEM);

		if (kidmap_getsidbygid(crgetzone(cr), xvattr.xva_vattr.va_gid,
		    &domain, &rid) == 0) {
			VERIFY(nvlist_add_string(nvl_sid,
			    SID_DOMAIN, domain) == 0);
			VERIFY(nvlist_add_uint32(nvl_sid, SID_RID, rid) == 0);
			VERIFY(nvlist_add_nvlist(nvlp, attr_to_name(F_GROUPSID),
			    nvl_sid) == 0);
		}
		nvlist_free(nvl_sid);
	}

	return (0);
}
Exemplo n.º 22
0
static int
sysctl_integriforce_so(SYSCTL_HANDLER_ARGS)
{
    integriforce_so_check_t *integriforce_so;
    secadm_prison_entry_t *entry;
    secadm_rule_t r, *rule;
    struct nameidata nd;
    struct vattr vap;
    secadm_key_t key;
    int err;

    if (!(req->newptr) || req->newlen != sizeof(integriforce_so_check_t))
        return (EINVAL);

    if (!(req->oldptr) || req->oldlen != sizeof(integriforce_so_check_t))
        return (EINVAL);

    integriforce_so = malloc(sizeof(integriforce_so_check_t), M_SECADM, M_WAITOK);

    err = SYSCTL_IN(req, integriforce_so, sizeof(integriforce_so_check_t));
    if (err) {
        free(integriforce_so, M_SECADM);
        return (err);
    }

    NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, integriforce_so->isc_path, req->td);
    err = namei(&nd);
    if (err) {
        free(integriforce_so, M_SECADM);
        NDFREE(&nd, 0);
        return (err);
    }

    if ((err = vn_lock(nd.ni_vp, LK_SHARED | LK_RETRY)) != 0) {
        free(integriforce_so, M_SECADM);
        NDFREE(&nd, 0);
        return (err);
    }

    err = VOP_GETATTR(nd.ni_vp, &vap, req->td->td_ucred);
    if (err) {
        free(integriforce_so, M_SECADM);
        NDFREE(&nd, 0);
        return (err);
    }

    VOP_UNLOCK(nd.ni_vp, 0);

    key.sk_jid = req->td->td_ucred->cr_prison->pr_id;
    key.sk_type = secadm_integriforce_rule;
    key.sk_fileid = vap.va_fileid;
    strncpy(key.sk_mntonname,
            nd.ni_vp->v_mount->mnt_stat.f_mntonname, MNAMELEN);
    r.sr_key = fnv_32_buf(&key, sizeof(secadm_key_t), FNV1_32_INIT);

    entry = get_prison_list_entry(
                req->td->td_ucred->cr_prison->pr_id);

    PE_RLOCK(entry);
    rule = RB_FIND(secadm_rules_tree, &(entry->sp_rules), &r);

    if (rule) {
        integriforce_so->isc_result =
            do_integriforce_check(rule, &vap, nd.ni_vp,
                                  req->td->td_ucred);
    }

    PE_RUNLOCK(entry);

    SYSCTL_OUT(req, integriforce_so, sizeof(integriforce_so_check_t));
    free(integriforce_so, M_SECADM);

    NDFREE(&nd, 0);

    return (0);
}
Exemplo n.º 23
0
int
puffs_bioread(struct vnode *vp, struct uio *uio, int ioflag,
    struct ucred *cred)
{
	int biosize = vp->v_mount->mnt_stat.f_iosize;
	struct buf *bp;
	struct vattr vattr;
	off_t lbn, loffset, fsize;
	size_t n;
	int boff;
	int error = 0;

	KKASSERT(uio->uio_rw == UIO_READ);
	KKASSERT(vp->v_type == VREG);

	if (uio->uio_offset < 0)
		return EINVAL;
	if (uio->uio_resid == 0)
		return 0;

	/*
	 * Cache consistency can only be maintained approximately.
	 *
	 * GETATTR is called to synchronize the file size.
	 *
	 * NOTE: In the normal case the attribute cache is not
	 * cleared which means GETATTR may use cached data and
	 * not immediately detect changes made on the server.
	 */

	error = VOP_GETATTR(vp, &vattr);
	if (error)
		return error;

	/*
	 * Loop until uio exhausted or we hit EOF
	 */
	do {
		bp = NULL;

		lbn = uio->uio_offset / biosize;
		boff = uio->uio_offset & (biosize - 1);
		loffset = lbn * biosize;
		fsize = puffs_meta_getsize(vp);

		if (loffset + boff >= fsize) {
			n = 0;
			break;
		}
		bp = getblk(vp, loffset, biosize, 0, 0);

		if (bp == NULL)
			return EINTR;

		/*
		 * If B_CACHE is not set, we must issue the read.  If this
		 * fails, we return an error.
		 */
		if ((bp->b_flags & B_CACHE) == 0) {
			bp->b_cmd = BUF_CMD_READ;
			bp->b_bio2.bio_done = puffs_iodone;
			bp->b_bio2.bio_flags |= BIO_SYNC;
			vfs_busy_pages(vp, bp);
			error = puffs_doio(vp, &bp->b_bio2, uio->uio_td);
			if (error) {
				brelse(bp);
				return error;
			}
		}

		/*
		 * on is the offset into the current bp.  Figure out how many
		 * bytes we can copy out of the bp.  Note that bcount is
		 * NOT DEV_BSIZE aligned.
		 *
		 * Then figure out how many bytes we can copy into the uio.
		 */
		n = biosize - boff;
		if (n > uio->uio_resid)
			n = uio->uio_resid;
		if (loffset + boff + n > fsize)
			n = fsize - loffset - boff;

		if (n > 0)
			error = uiomove(bp->b_data + boff, n, uio);
		if (bp)
			brelse(bp);
	} while (error == 0 && uio->uio_resid > 0 && n > 0);

	return error;
}
Exemplo n.º 24
0
static int
zfs_vfs_mount(struct mount *mp, vnode_t devvp, user_addr_t data, vfs_context_t context)
{
	char	*osname = NULL;
	size_t  osnamelen = 0;
	int		error = 0;
	int		canwrite;
	/*
	 * Get the objset name (the "special" mount argument).
	 * The filesystem that we mount as root is defined in the
	 * "zfs-bootfs" property. 
	 */
	if (data) {
		user_addr_t fspec = USER_ADDR_NULL;
#ifndef __APPLE__
		if (ddi_prop_lookup_string(DDI_DEV_T_ANY, ddi_root_node(),
		    DDI_PROP_DONTPASS, "zfs-bootfs", &zfs_bootpath) !=
		    DDI_SUCCESS)
			return (EIO);

		error = parse_bootpath(zfs_bootpath, rootfs.bo_name);
		ddi_prop_free(zfs_bootpath);
#endif
		osname = kmem_alloc(MAXPATHLEN, KM_SLEEP);

		if (vfs_context_is64bit(context)) {
			if ( (error = copyin(data, (caddr_t)&fspec, sizeof(fspec))) )
				goto out;	
		} else {
#ifdef ZFS_LEOPARD_ONLY
			char *tmp;
#else
			user32_addr_t tmp;
#endif
			if ( (error = copyin(data, (caddr_t)&tmp, sizeof(tmp))) )
				goto out;	
			/* munge into LP64 addr */
			fspec = CAST_USER_ADDR_T(tmp);
		}
		if ( (error = copyinstr(fspec, osname, MAXPATHLEN, &osnamelen)) )
			goto out;
	}

#if 0
	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	mutex_enter(&mvp->v_lock);
	if ((uap->flags & MS_REMOUNT) == 0 &&
	    (uap->flags & MS_OVERLAY) == 0 &&
	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
		mutex_exit(&mvp->v_lock);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * ZFS does not support passing unparsed data in via MS_DATA.
	 * Users should use the MS_OPTIONSTR interface; this means
	 * that all option parsing is already done and the options struct
	 * can be interrogated.
	 */
	if ((uap->flags & MS_DATA) && uap->datalen > 0)
		return (EINVAL);

	/*
	 * Get the objset name (the "special" mount argument).
	 */
	if (error = pn_get(uap->spec, fromspace, &spn))
		return (error);

	osname = spn.pn_path;
#endif
	/*
	 * Check for mount privilege?
	 *
	 * If we don't have privilege then see if
	 * we have local permission to allow it
	 */
#ifndef __APPLE__
	error = secpolicy_fs_mount(cr, mvp, vfsp);
	if (error) {
		error = dsl_deleg_access(osname, ZFS_DELEG_PERM_MOUNT, cr);
		if (error == 0) {
			vattr_t		vattr;

			/*
			 * Make sure user is the owner of the mount point
			 * or has sufficient privileges.
			 */

			vattr.va_mask = AT_UID;

			if (error = VOP_GETATTR(mvp, &vattr, 0, cr)) {
				goto out;
			}

			if (error = secpolicy_vnode_owner(cr, vattr.va_uid)) {
				goto out;
			}

			if (error = VOP_ACCESS(mvp, VWRITE, 0, cr)) {
				goto out;
			}

			secpolicy_fs_mount_clearopts(cr, vfsp);
		} else {
			goto out;
		}
	}
#endif

	error = zfs_domount(mp, 0, osname, context);
	if (error)
		printf("zfs_vfs_mount: error %d\n", error);
	if (error == 0) {
		zfsvfs_t *zfsvfs = NULL;

		/* Make the Finder treat sub file systems just like a folder */
		if (strpbrk(osname, "/"))
			vfs_setflags(mp, (u_int64_t)((unsigned int)MNT_DONTBROWSE));

		/* Indicate to VFS that we support ACLs. */
		vfs_setextendedsecurity(mp);

		/* Advisory locking should be handled at the VFS layer */
		vfs_setlocklocal(mp);

		/*
		 * Mac OS X needs a file system modify time
		 *
		 * We use the mtime of the "com.apple.system.mtime" 
		 * extended attribute, which is associated with the
		 * file system root directory.
		 *
		 * Here we need to take a ref on z_mtime_vp to keep it around.
		 * If the attribute isn't there, attempt to create it.
		 */
		zfsvfs = vfs_fsprivate(mp);
		if (zfsvfs->z_mtime_vp == NULL) {
			struct vnode * rvp;
			struct vnode *xdvp = NULLVP;
			struct vnode *xvp = NULLVP;
			znode_t *rootzp;
			timestruc_t modify_time;
			cred_t  *cr;
			timestruc_t  now;
			int flag;
			int result;

			if (zfs_zget(zfsvfs, zfsvfs->z_root, &rootzp) != 0) {
				goto out;
			}
			rvp = ZTOV(rootzp);
			cr = (cred_t *)vfs_context_ucred(context);

			/* Grab the hidden attribute directory vnode. */
			result = zfs_get_xattrdir(rootzp, &xdvp, cr, CREATE_XATTR_DIR);
			vnode_put(rvp);	/* all done with root vnode */
			rvp = NULL;
			if (result) {
				goto out;
			}

			/*
			 * HACK - workaround missing vnode_setnoflush() KPI...
			 *
			 * We tag zfsvfs so that zfs_attach_vnode() can then set
			 * vnfs_marksystem when the vnode gets created.
			 */
			zfsvfs->z_last_unmount_time = 0xBADC0DE;
			zfsvfs->z_last_mtime_synced = VTOZ(xdvp)->z_id;
			flag = vfs_isrdonly(mp) ? 0 : ZEXISTS;
			/* Lookup or create the named attribute. */
			if ( zfs_obtain_xattr(VTOZ(xdvp), ZFS_MTIME_XATTR,
			                          S_IRUSR | S_IWUSR, cr, &xvp,
			                          flag) ) {
					zfsvfs->z_last_unmount_time = 0;
					zfsvfs->z_last_mtime_synced = 0;
					vnode_put(xdvp);
					goto out;
				}
				gethrestime(&now);
			ZFS_TIME_ENCODE(&now, VTOZ(xvp)->z_phys->zp_mtime);
			vnode_put(xdvp);
			vnode_ref(xvp);

			zfsvfs->z_mtime_vp = xvp;
			ZFS_TIME_DECODE(&modify_time, VTOZ(xvp)->z_phys->zp_mtime);
			zfsvfs->z_last_unmount_time = modify_time.tv_sec;
			zfsvfs->z_last_mtime_synced = modify_time.tv_sec;

			/*
			 * Keep this referenced vnode from impeding an unmount.
			 *
			 * XXX vnode_setnoflush() is MIA from KPI (see workaround above).
			 */
#if 0
			vnode_setnoflush(xvp);
#endif
			vnode_put(xvp);
		}
	}
out:
	if (osname) {
		kmem_free(osname, MAXPATHLEN);
	}
	return (error);
}
Exemplo n.º 25
0
static int
coff_load_file(struct thread *td, char *name)
{
	struct proc *p = td->td_proc;
  	struct vmspace *vmspace = p->p_vmspace;
  	int error;
  	struct nameidata nd;
  	struct vnode *vp;
  	struct vattr attr;
  	struct filehdr *fhdr;
  	struct aouthdr *ahdr;
  	struct scnhdr *scns;
  	char *ptr = 0;
  	int nscns;
  	unsigned long text_offset = 0, text_address = 0, text_size = 0;
  	unsigned long data_offset = 0, data_address = 0, data_size = 0;
  	unsigned long bss_size = 0;
  	int i;

	NDINIT(&nd, LOOKUP, ISOPEN | LOCKLEAF | FOLLOW | SAVENAME,
	    UIO_SYSSPACE, name, td);

  	error = namei(&nd);
  	if (error)
    		return error;

  	vp = nd.ni_vp;
  	if (vp == NULL)
    		return ENOEXEC;

  	if (vp->v_writecount) {
    		error = ETXTBSY;
    		goto fail;
  	}

  	if ((error = VOP_GETATTR(vp, &attr, td->td_ucred, td)) != 0)
    		goto fail;

  	if ((vp->v_mount->mnt_flag & MNT_NOEXEC)
	    || ((attr.va_mode & 0111) == 0)
	    || (attr.va_type != VREG))
    		goto fail;

  	if (attr.va_size == 0) {
    		error = ENOEXEC;
    		goto fail;
  	}

  	if ((error = VOP_ACCESS(vp, VEXEC, td->td_ucred, td)) != 0)
    		goto fail;

  	if ((error = VOP_OPEN(vp, FREAD, td->td_ucred, td, NULL)) != 0)
    		goto fail;

	/*
	 * Lose the lock on the vnode. It's no longer needed, and must not
	 * exist for the pagefault paging to work below.
	 */
	VOP_UNLOCK(vp, 0, td);

  	if ((error = vm_mmap(kernel_map,
			    (vm_offset_t *) &ptr,
			    PAGE_SIZE,
			    VM_PROT_READ,
		       	    VM_PROT_READ,
			    0,
			    OBJT_VNODE,
			    vp,
			    0)) != 0)
		goto unlocked_fail;

  	fhdr = (struct filehdr *)ptr;

  	if (fhdr->f_magic != I386_COFF) {
    		error = ENOEXEC;
    		goto dealloc_and_fail;
  	}

  	nscns = fhdr->f_nscns;

  	if ((nscns * sizeof(struct scnhdr)) > PAGE_SIZE) {
    		/*
     		 * XXX -- just fail.  I'm so lazy.
     		 */
    		error = ENOEXEC;
    		goto dealloc_and_fail;
  	}

  	ahdr = (struct aouthdr*)(ptr + sizeof(struct filehdr));

  	scns = (struct scnhdr*)(ptr + sizeof(struct filehdr)
			  + sizeof(struct aouthdr));

  	for (i = 0; i < nscns; i++) {
    		if (scns[i].s_flags & STYP_NOLOAD)
      			continue;
    		else if (scns[i].s_flags & STYP_TEXT) {
      			text_address = scns[i].s_vaddr;
      			text_size = scns[i].s_size;
      			text_offset = scns[i].s_scnptr;
    		}
		else if (scns[i].s_flags & STYP_DATA) {
      			data_address = scns[i].s_vaddr;
      			data_size = scns[i].s_size;
      			data_offset = scns[i].s_scnptr;
    		} else if (scns[i].s_flags & STYP_BSS) {
      			bss_size = scns[i].s_size;
    		}
  	}

  	if ((error = load_coff_section(vmspace, vp, text_offset,
				      (caddr_t)(void *)(uintptr_t)text_address,
				      text_size, text_size,
				      VM_PROT_READ | VM_PROT_EXECUTE)) != 0) {
    		goto dealloc_and_fail;
  	}
  	if ((error = load_coff_section(vmspace, vp, data_offset,
				      (caddr_t)(void *)(uintptr_t)data_address,
				      data_size + bss_size, data_size,
				      VM_PROT_ALL)) != 0) {
    		goto dealloc_and_fail;
  	}

  	error = 0;

 dealloc_and_fail:
	if (vm_map_remove(kernel_map,
			  (vm_offset_t) ptr,
			  (vm_offset_t) ptr + PAGE_SIZE))
    		panic("%s vm_map_remove failed", __func__);

 fail:
	VOP_UNLOCK(vp, 0, td);
 unlocked_fail:
	NDFREE(&nd, NDF_ONLY_PNBUF);
	vrele(nd.ni_vp);
  	return error;
}
Exemplo n.º 26
0
static int
smbfs_node_alloc(struct mount *mp, struct vnode *dvp, const char *dirnm, 
	int dirlen, const char *name, int nmlen, char sep, 
	struct smbfattr *fap, struct vnode **vpp)
{
	struct vattr vattr;
	struct thread *td = curthread;	/* XXX */
	struct smbmount *smp = VFSTOSMBFS(mp);
	struct smbnode *np, *dnp;
	struct vnode *vp, *vp2;
	struct smbcmp sc;
	char *p, *rpath;
	int error, rplen;

	sc.n_parent = dvp;
	sc.n_nmlen = nmlen;
	sc.n_name = name;	
	if (smp->sm_root != NULL && dvp == NULL) {
		SMBERROR("do not allocate root vnode twice!\n");
		return EINVAL;
	}
	if (nmlen == 2 && bcmp(name, "..", 2) == 0) {
		if (dvp == NULL)
			return EINVAL;
		vp = VTOSMB(VTOSMB(dvp)->n_parent)->n_vnode;
		error = vget(vp, LK_EXCLUSIVE, td);
		if (error == 0)
			*vpp = vp;
		return error;
	} else if (nmlen == 1 && name[0] == '.') {
		SMBERROR("do not call me with dot!\n");
		return EINVAL;
	}
	dnp = dvp ? VTOSMB(dvp) : NULL;
	if (dnp == NULL && dvp != NULL) {
		vprint("smbfs_node_alloc: dead parent vnode", dvp);
		return EINVAL;
	}
	error = vfs_hash_get(mp, smbfs_hash(name, nmlen), LK_EXCLUSIVE, td,
	    vpp, smbfs_vnode_cmp, &sc);
	if (error)
		return (error);
	if (*vpp) {
		np = VTOSMB(*vpp);
		/* Force cached attributes to be refreshed if stale. */
		(void)VOP_GETATTR(*vpp, &vattr, td->td_ucred);
		/*
		 * If the file type on the server is inconsistent with
		 * what it was when we created the vnode, kill the
		 * bogus vnode now and fall through to the code below
		 * to create a new one with the right type.
		 */
		if (((*vpp)->v_type == VDIR && 
		    (np->n_dosattr & SMB_FA_DIR) == 0) ||
	    	    ((*vpp)->v_type == VREG && 
		    (np->n_dosattr & SMB_FA_DIR) != 0)) {
			vgone(*vpp);
			vput(*vpp);
		}
		else {
			SMBVDEBUG("vnode taken from the hashtable\n");
			return (0);
		}
	}
	/*
	 * If we don't have node attributes, then it is an explicit lookup
	 * for an existing vnode.
	 */
	if (fap == NULL)
		return ENOENT;

	error = getnewvnode("smbfs", mp, &smbfs_vnodeops, vpp);
	if (error)
		return (error);
	vp = *vpp;
	np = malloc(sizeof *np, M_SMBNODE, M_WAITOK | M_ZERO);
	rplen = dirlen;
	if (sep != '\0')
		rplen++;
	rplen += nmlen;
	rpath = malloc(rplen + 1, M_SMBNODENAME, M_WAITOK);
	p = rpath;
	bcopy(dirnm, p, dirlen);
	p += dirlen;
	if (sep != '\0')
		*p++ = sep;
	if (name != NULL) {
		bcopy(name, p, nmlen);
		p += nmlen;
	}
	*p = '\0';
	MPASS(p == rpath + rplen);
	lockmgr(vp->v_vnlock, LK_EXCLUSIVE, NULL);
	/* Vnode initialization */
	vp->v_type = fap->fa_attr & SMB_FA_DIR ? VDIR : VREG;
	vp->v_data = np;
	np->n_vnode = vp;
	np->n_mount = VFSTOSMBFS(mp);
	np->n_rpath = rpath;
	np->n_rplen = rplen;
	np->n_nmlen = nmlen;
	np->n_name = smbfs_name_alloc(name, nmlen);
	np->n_ino = fap->fa_ino;
	if (dvp) {
		ASSERT_VOP_LOCKED(dvp, "smbfs_node_alloc");
		np->n_parent = dvp;
		np->n_parentino = VTOSMB(dvp)->n_ino;
		if (/*vp->v_type == VDIR &&*/ (dvp->v_vflag & VV_ROOT) == 0) {
			vref(dvp);
			np->n_flag |= NREFPARENT;
		}
	} else if (vp->v_type == VREG)
		SMBERROR("new vnode '%s' born without parent ?\n", np->n_name);
	error = insmntque(vp, mp);
	if (error) {
		free(np, M_SMBNODE);
		return (error);
	}
	error = vfs_hash_insert(vp, smbfs_hash(name, nmlen), LK_EXCLUSIVE,
	    td, &vp2, smbfs_vnode_cmp, &sc);
	if (error) 
		return (error);
	if (vp2 != NULL)
		*vpp = vp2;
	return (0);
}
Exemplo n.º 27
0
STATIC int
xfs_ioc_xattr(
	vnode_t			*vp,
	xfs_inode_t		*ip,
	struct file		*filp,
	unsigned int		cmd,
	unsigned long		arg)
{
	struct fsxattr		fa;
	vattr_t			va;
	int			error;
	int			attr_flags;
	unsigned int		flags;

	switch (cmd) {
	case XFS_IOC_FSGETXATTR: {
		va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_NEXTENTS;
		VOP_GETATTR(vp, &va, 0, NULL, error);
		if (error)
			return -error;

		fa.fsx_xflags	= va.va_xflags;
		fa.fsx_extsize	= va.va_extsize;
		fa.fsx_nextents = va.va_nextents;

		if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa)))
			return -XFS_ERROR(EFAULT);
		return 0;
	}

	case XFS_IOC_FSSETXATTR: {
		if (copy_from_user(&fa, (struct fsxattr *)arg, sizeof(fa)))
			return -XFS_ERROR(EFAULT);

		attr_flags = 0;
		if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
			attr_flags |= ATTR_NONBLOCK;

		va.va_mask = XFS_AT_XFLAGS | XFS_AT_EXTSIZE;
		va.va_xflags  = fa.fsx_xflags;
		va.va_extsize = fa.fsx_extsize;

		VOP_SETATTR(vp, &va, attr_flags, NULL, error);
		if (!error)
			vn_revalidate(vp);	/* update Linux inode flags */
		return -error;
	}

	case XFS_IOC_FSGETXATTRA: {
		va.va_mask = XFS_AT_XFLAGS|XFS_AT_EXTSIZE|XFS_AT_ANEXTENTS;
		VOP_GETATTR(vp, &va, 0, NULL, error);
		if (error)
			return -error;

		fa.fsx_xflags	= va.va_xflags;
		fa.fsx_extsize	= va.va_extsize;
		fa.fsx_nextents = va.va_anextents;

		if (copy_to_user((struct fsxattr *)arg, &fa, sizeof(fa)))
			return -XFS_ERROR(EFAULT);
		return 0;
	}

	case XFS_IOC_GETXFLAGS: {
		flags = xfs_di2lxflags(ip->i_d.di_flags);
		if (copy_to_user((unsigned int *)arg, &flags, sizeof(flags)))
			return -XFS_ERROR(EFAULT);
		return 0;
	}

	case XFS_IOC_SETXFLAGS: {
		if (copy_from_user(&flags, (unsigned int *)arg, sizeof(flags)))
			return -XFS_ERROR(EFAULT);

		if (flags & ~(LINUX_XFLAG_IMMUTABLE | LINUX_XFLAG_APPEND | \
			      LINUX_XFLAG_NOATIME | LINUX_XFLAG_NODUMP | \
			      LINUX_XFLAG_SYNC))
			return -XFS_ERROR(EOPNOTSUPP);

		attr_flags = 0;
		if (filp->f_flags & (O_NDELAY|O_NONBLOCK))
			attr_flags |= ATTR_NONBLOCK;

		va.va_mask = XFS_AT_XFLAGS;
		va.va_xflags = xfs_merge_ioc_xflags(flags,
				xfs_dic2xflags(&ip->i_d, ARCH_NOCONVERT));

		VOP_SETATTR(vp, &va, attr_flags, NULL, error);
		if (!error)
			vn_revalidate(vp);	/* update Linux inode flags */
		return -error;
	}

	case XFS_IOC_GETVERSION: {
		flags = LINVFS_GET_IP(vp)->i_generation;
		if (copy_to_user((unsigned int *)arg, &flags, sizeof(flags)))
			return -XFS_ERROR(EFAULT);
		return 0;
	}

	default:
		return -ENOTTY;
	}
}
Exemplo n.º 28
0
/*
 * Search an alternate path before passing pathname arguments on
 * to system calls. Useful for keeping a separate 'emulation tree'.
 *
 * If cflag is set, we check if an attempt can be made to create
 * the named file, i.e. we check if the directory it should
 * be in exists.
 */
int
emul_find(struct proc *p, caddr_t *sgp, const char *prefix,
    char *path, char **pbuf, int cflag)
{
	struct nameidata	 nd;
	struct nameidata	 ndroot;
	struct vattr		 vat;
	struct vattr		 vatroot;
	int			 error;
	char			*ptr, *buf, *cp;
	const char		*pr;
	size_t			 sz, len;

	buf = (char *) malloc(MAXPATHLEN, M_TEMP, M_WAITOK);
	*pbuf = path;

	for (ptr = buf, pr = prefix; (*ptr = *pr) != '\0'; ptr++, pr++)
		continue;

	sz = MAXPATHLEN - (ptr - buf);

	/* 
	 * If sgp is not given then the path is already in kernel space
	 */
	if (sgp == NULL)
		error = copystr(path, ptr, sz, &len);
	else
		error = copyinstr(path, ptr, sz, &len);

	if (error)
		goto bad;

	if (*ptr != '/') {
		error = EINVAL;
		goto bad;
	}

	/*
	 * We know that there is a / somewhere in this pathname.
	 * Search backwards for it, to find the file's parent dir
	 * to see if it exists in the alternate tree. If it does,
	 * and we want to create a file (cflag is set). We don't
	 * need to worry about the root comparison in this case.
	 */

	if (cflag) {
		for (cp = &ptr[len] - 1; *cp != '/'; cp--)
			;
		*cp = '\0';

		NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, buf, p);

		if ((error = namei(&nd)) != 0)
			goto bad;

		*cp = '/';
	} else {
		NDINIT(&nd, LOOKUP, FOLLOW, UIO_SYSSPACE, buf, p);

		if ((error = namei(&nd)) != 0)
			goto bad;

		/*
		 * We now compare the vnode of the emulation root to the one
		 * vnode asked. If they resolve to be the same, then we
		 * ignore the match so that the real root gets used.
		 * This avoids the problem of traversing "../.." to find the
		 * root directory and never finding it, because "/" resolves
		 * to the emulation root directory. This is expensive :-(
		 */
		/* XXX: prototype should have const here for NDINIT */
		NDINIT(&ndroot, LOOKUP, FOLLOW, UIO_SYSSPACE, prefix, p);

		if ((error = namei(&ndroot)) != 0)
			goto bad2;

		if ((error = VOP_GETATTR(nd.ni_vp, &vat, p->p_ucred, p)) != 0)
			goto bad3;

		if ((error = VOP_GETATTR(ndroot.ni_vp, &vatroot, p->p_ucred, p))
		    != 0)
			goto bad3;

		if (vat.va_fsid == vatroot.va_fsid &&
		    vat.va_fileid == vatroot.va_fileid) {
			error = ENOENT;
			goto bad3;
		}
	}
	if (sgp == NULL)
		*pbuf = buf;
	else {
		sz = &ptr[len] - buf;
		*pbuf = stackgap_alloc(sgp, sz + 1);
		if (*pbuf == NULL) {
			error = ENAMETOOLONG;
			goto bad;
		}
		if ((error = copyout(buf, *pbuf, sz)) != 0) {
			*pbuf = path;
			goto bad;
		}
		free(buf, M_TEMP, 0);
	}

	vrele(nd.ni_vp);
	if (!cflag)
		vrele(ndroot.ni_vp);
	return error;

bad3:
	vrele(ndroot.ni_vp);
bad2:
	vrele(nd.ni_vp);
bad:
	free(buf, M_TEMP, 0);
	return error;
}
Exemplo n.º 29
0
/*
 * This function checks the path to a new export to
 * check whether all the pathname components are
 * exported. It works by climbing the file tree one
 * component at a time via "..", crossing mountpoints
 * if necessary until an export entry is found, or the
 * system root is reached.
 *
 * If an unexported mountpoint is found, then
 * a new pseudo export is added and the pathname from
 * the mountpoint down to the export is added to the
 * visible list for the new pseudo export.  If an existing
 * pseudo export is found, then the pathname is added
 * to its visible list.
 *
 * Note that there's some tests for exportdir.
 * The exportinfo entry that's passed as a parameter
 * is that of the real export and exportdir is set
 * for this case.
 *
 * Here is an example of a possible setup:
 *
 * () - a new fs; fs mount point
 * EXPORT - a real exported node
 * PSEUDO - a pseudo node
 * vis - visible list
 * f# - security flavor#
 * (f#) - security flavor# propagated from its descendents
 * "" - covered vnode
 *
 *
 *                 /
 *                 |
 *                 (a) PSEUDO (f1,f2)
 *                 |   vis: b,b,"c","n"
 *                 |
 *                 b
 *        ---------|------------------
 *        |                          |
 *        (c) EXPORT,f1(f2)          (n) PSEUDO (f1,f2)
 *        |   vis: "e","d"           |   vis: m,m,,p,q,"o"
 *        |                          |
 *  ------------------          -------------------
 *  |        |        |         |                  |
 *  (d)      (e)      f         m EXPORT,f1(f2)    p
 *  EXPORT   EXPORT             |                  |
 *  f1       f2                 |                  |
 *           |                  |                  |
 *           j                 (o) EXPORT,f2       q EXPORT f2
 *
 */
int
treeclimb_export(struct exportinfo *exip)
{
	vnode_t *dvp, *vp;
	fid_t fid;
	int error;
	int exportdir;
	struct exportinfo *new_exi = exip;
	struct exp_visible *visp;
	struct exp_visible *vis_head = NULL;
	struct vattr va;
	treenode_t *tree_head = NULL;
	timespec_t now;

	ASSERT(RW_WRITE_HELD(&exported_lock));

	gethrestime(&now);

	vp = exip->exi_vp;
	VN_HOLD(vp);
	exportdir = 1;

	for (;;) {

		bzero(&fid, sizeof (fid));
		fid.fid_len = MAXFIDSZ;
		error = vop_fid_pseudo(vp, &fid);
		if (error)
			break;

		/*
		 * The root of the file system needs special handling
		 */
		if (vp->v_flag & VROOT) {
			if (! exportdir) {
				struct exportinfo *exi;

				/*
				 * Check if this VROOT dir is already exported.
				 * If so, then attach the pseudonodes.  If not,
				 * then continue .. traversal until we hit a
				 * VROOT export (pseudo or real).
				 */
				exi = checkexport4(&vp->v_vfsp->vfs_fsid, &fid,
				    vp);
				if (exi != NULL) {
					/*
					 * Found an export info
					 *
					 * Extend the list of visible
					 * directories whether it's a pseudo
					 * or a real export.
					 */
					more_visible(exi, tree_head);
					break;	/* and climb no further */
				}

				/*
				 * Found the root directory of a filesystem
				 * that isn't exported.  Need to export
				 * this as a pseudo export so that an NFS v4
				 * client can do lookups in it.
				 */
				new_exi = pseudo_exportfs(vp, &fid, vis_head,
				    NULL);
				vis_head = NULL;
			}

			if (VN_CMP(vp, rootdir)) {
				/* at system root */
				/*
				 * If sharing "/", new_exi is shared exportinfo
				 * (exip). Otherwise, new_exi is exportinfo
				 * created by pseudo_exportfs() above.
				 */
				ns_root = tree_prepend_node(tree_head, NULL,
				    new_exi);

				/* Update the change timestamp */
				tree_update_change(ns_root, &now);

				break;
			}

			/*
			 * Traverse across the mountpoint and continue the
			 * climb on the mounted-on filesystem.
			 */
			vp = untraverse(vp);
			exportdir = 0;
			continue;
		}

		/*
		 * Do a getattr to obtain the nodeid (inode num)
		 * for this vnode.
		 */
		va.va_mask = AT_NODEID;
		error = VOP_GETATTR(vp, &va, 0, CRED(), NULL);
		if (error)
			break;

		/*
		 *  Add this directory fid to visible list
		 */
		visp = kmem_alloc(sizeof (*visp), KM_SLEEP);
		VN_HOLD(vp);
		visp->vis_vp = vp;
		visp->vis_fid = fid;		/* structure copy */
		visp->vis_ino = va.va_nodeid;
		visp->vis_count = 1;
		visp->vis_exported = exportdir;
		visp->vis_secinfo = NULL;
		visp->vis_seccnt = 0;
		visp->vis_change = now;		/* structure copy */
		visp->vis_next = vis_head;
		vis_head = visp;

		/*
		 * Will set treenode's pointer to exportinfo to
		 * 1. shared exportinfo (exip) - if first visit here
		 * 2. freshly allocated pseudo export (if any)
		 * 3. null otherwise
		 */
		tree_head = tree_prepend_node(tree_head, visp, new_exi);
		new_exi = NULL;

		/*
		 * Now, do a ".." to find parent dir of vp.
		 */
		error = VOP_LOOKUP(vp, "..", &dvp, NULL, 0, NULL, CRED(),
		    NULL, NULL, NULL);

		if (error == ENOTDIR && exportdir) {
			dvp = exip->exi_dvp;
			ASSERT(dvp != NULL);
			VN_HOLD(dvp);
			error = 0;
		}

		if (error)
			break;

		exportdir = 0;
		VN_RELE(vp);
		vp = dvp;
	}

	VN_RELE(vp);

	/*
	 * We can have set error due to error in:
	 * 1. vop_fid_pseudo()
	 * 2. VOP_GETATTR()
	 * 3. VOP_LOOKUP()
	 * We must free pseudo exportinfos, visibles and treenodes.
	 * Visibles are referenced from treenode_t::tree_vis and
	 * exportinfo_t::exi_visible. To avoid double freeing, only
	 * exi_visible pointer is used, via exi_rele(), for the clean-up.
	 */
	if (error) {
		/* Free unconnected visibles, if there are any. */
		if (vis_head)
			free_visible(vis_head);

		/* Connect unconnected exportinfo, if there is any. */
		if (new_exi && new_exi != exip)
			tree_head = tree_prepend_node(tree_head, NULL, new_exi);

		while (tree_head) {
			treenode_t *t2 = tree_head;
			exportinfo_t *e  = tree_head->tree_exi;
			/* exip will be freed in exportfs() */
			if (e && e != exip) {
				export_unlink(e);
				exi_rele(e);
			}
			tree_head = tree_head->tree_child_first;
			kmem_free(t2, sizeof (*t2));
		}
	}

	return (error);
}
Exemplo n.º 30
0
/*
 *	Routine:	macx_swapon
 *	Function:
 *		Syscall interface to add a file to backing store
 */
int
macx_swapon(
    char 	*filename,
    int	flags,
    long	size,
    long	priority)
{
    struct vnode		*vp = 0;
    struct nameidata 	nd, *ndp;
    struct proc		*p =  current_proc();
    pager_file_t		pf;
    register int		error;
    kern_return_t		kr;
    mach_port_t		backing_store;
    memory_object_default_t	default_pager;
    int			i;
    boolean_t		funnel_state;

    struct vattr	vattr;

    AUDIT_MACH_SYSCALL_ENTER(AUE_SWAPON);
    AUDIT_ARG(value, priority);

    funnel_state = thread_funnel_set(kernel_flock, TRUE);
    ndp = &nd;

    if ((error = suser(p->p_ucred, &p->p_acflag)))
        goto swapon_bailout;

    if(default_pager_init_flag == 0) {
        start_def_pager(NULL);
        default_pager_init_flag = 1;
    }

    /*
     * Get a vnode for the paging area.
     */
    NDINIT(ndp, LOOKUP, FOLLOW | LOCKLEAF | AUDITVNPATH1, UIO_USERSPACE,
           filename, p);

    if ((error = namei(ndp)))
        goto swapon_bailout;
    vp = ndp->ni_vp;

    if (vp->v_type != VREG) {
        error = EINVAL;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }
    UBCINFOCHECK("macx_swapon", vp);

    if (error = VOP_GETATTR(vp, &vattr, p->p_ucred, p)) {
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    if (vattr.va_size < (u_quad_t)size) {
        vattr_null(&vattr);
        vattr.va_size = (u_quad_t)size;
        error = VOP_SETATTR(vp, &vattr, p->p_ucred, p);
        if (error) {
            VOP_UNLOCK(vp, 0, p);
            goto swapon_bailout;
        }
    }

    /* add new backing store to list */
    i = 0;
    while(bs_port_table[i].vp != 0) {
        if(i == MAX_BACKING_STORE)
            break;
        i++;
    }
    if(i == MAX_BACKING_STORE) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        goto swapon_bailout;
    }

    /* remember the vnode. This vnode has namei() reference */
    bs_port_table[i].vp = vp;

    /*
     * Look to see if we are already paging to this file.
     */
    /* make certain the copy send of kernel call will work */
    default_pager = MEMORY_OBJECT_DEFAULT_NULL;
    kr = host_default_memory_manager(host_priv_self(), &default_pager, 0);
    if(kr != KERN_SUCCESS) {
        error = EAGAIN;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    kr = default_pager_backing_store_create(default_pager,
                                            -1, /* default priority */
                                            0, /* default cluster size */
                                            &backing_store);
    memory_object_default_deallocate(default_pager);

    if(kr != KERN_SUCCESS) {
        error = ENOMEM;
        VOP_UNLOCK(vp, 0, p);
        bs_port_table[i].vp = 0;
        goto swapon_bailout;
    }

    /*
     * NOTE: we are able to supply PAGE_SIZE here instead of
     *	an actual record size or block number because:
     *	a: we do not support offsets from the beginning of the
     *		file (allowing for non page size/record modulo offsets.
     *	b: because allow paging will be done modulo page size
     */

    VOP_UNLOCK(vp, 0, p);
    kr = default_pager_add_file(backing_store, vp, PAGE_SIZE,
                                ((int)vattr.va_size)/PAGE_SIZE);
    if(kr != KERN_SUCCESS) {
        bs_port_table[i].vp = 0;
        if(kr == KERN_INVALID_ARGUMENT)
            error = EINVAL;
        else
            error = ENOMEM;
        goto swapon_bailout;
    }
    bs_port_table[i].bs = (void *)backing_store;
    error = 0;
    if (!ubc_hold(vp))
        panic("macx_swapon: hold");

    /* Mark this vnode as being used for swapfile */
    SET(vp->v_flag, VSWAP);

    ubc_setcred(vp, p);

    /*
     * take an extra reference on the vnode to keep
     * vnreclaim() away from this vnode.
     */
    VREF(vp);

    /* Hold on to the namei  reference to the paging file vnode */
    vp = 0;

swapon_bailout:
    if (vp) {
        vrele(vp);
    }
    (void) thread_funnel_set(kernel_flock, FALSE);
    AUDIT_MACH_SYSCALL_EXIT(error);
    return(error);
}