Example #1
0
/*
    struct vnop_access_args {
        struct vnode *a_vp;
#if VOP_ACCESS_TAKES_ACCMODE_T
        accmode_t a_accmode;
#else
        int a_mode;
#endif
        struct ucred *a_cred;
        struct thread *a_td;
    };
*/
static int
fuse_vnop_access(struct vop_access_args *ap)
{
	struct vnode *vp = ap->a_vp;
	int accmode = ap->a_accmode;
	struct ucred *cred = ap->a_cred;

	struct fuse_access_param facp;
	struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));

	int err;

	FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));

	if (fuse_isdeadfs(vp)) {
		if (vnode_isvroot(vp)) {
			return 0;
		}
		return ENXIO;
	}
	if (!(data->dataflags & FSESS_INITED)) {
		if (vnode_isvroot(vp)) {
			if (priv_check_cred(cred, PRIV_VFS_ADMIN, 0) ||
			    (fuse_match_cred(data->daemoncred, cred) == 0)) {
				return 0;
			}
		}
		return EBADF;
	}
	if (vnode_islnk(vp)) {
		return 0;
	}
	bzero(&facp, sizeof(facp));

	err = fuse_internal_access(vp, accmode, &facp, ap->a_td, ap->a_cred);
	FS_DEBUG2G("err=%d accmode=0x%x\n", err, accmode);
	return err;
}
Example #2
0
/*ARGSUSED*/
static int
zfs_vfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
{
	zfsvfs_t *zfsvfs = vfs_fsprivate(mp);	
	objset_t *os = zfsvfs->z_os;
	znode_t	*zp, *nextzp;
	int ret, i;
	int flags;
	
	/*XXX NOEL: delegation admin stuffs, add back if we use delg. admin */
#if 0
	ret = 0; /* UNDEFINED: secpolicy_fs_unmount(cr, vfsp); */
	if (ret) {
		ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
		    ZFS_DELEG_PERM_MOUNT, cr);
		if (ret)
			return (ret);
	}

	/*
	 * We purge the parent filesystem's vfsp as the parent filesystem
	 * and all of its snapshots have their vnode's v_vfsp set to the
	 * parent's filesystem's vfsp.  Note, 'z_parent' is self
	 * referential for non-snapshots.
	 */
	(void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
#endif

	/*
	 * Unmount any snapshots mounted under .zfs before unmounting the
	 * dataset itself.
	 */
#if 0
	if (zfsvfs->z_ctldir != NULL &&
	    (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) {
		return (ret);
#endif
	flags = SKIPSYSTEM;
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;

	ret = vflush(mp, NULLVP, flags);

	/*
	 * Mac OS X needs a file system modify time
	 *
	 * We use the mtime of the "com.apple.system.mtime" 
	 * extended attribute, which is associated with the
	 * file system root directory.
	 *
	 * Here we need to release the ref we took on z_mtime_vp during mount.
	 */
	if ((ret == 0) || (mntflags & MNT_FORCE)) {
		if (zfsvfs->z_mtime_vp != NULL) {
			struct vnode *mvp;

			mvp = zfsvfs->z_mtime_vp;
			zfsvfs->z_mtime_vp = NULL;

			if (vnode_get(mvp) == 0) {
				vnode_rele(mvp);
				vnode_recycle(mvp);
				vnode_put(mvp);
			}
		}
	}

	if (!(mntflags & MNT_FORCE)) {
		/*
		 * Check the number of active vnodes in the file system.
		 * Our count is maintained in the vfs structure, but the
		 * number is off by 1 to indicate a hold on the vfs
		 * structure itself.
		 *
		 * The '.zfs' directory maintains a reference of its
		 * own, and any active references underneath are
		 * reflected in the vnode count.
		 */
		
		if (ret)
			return (EBUSY);
#if 0
		if (zfsvfs->z_ctldir == NULL) {
			if (vfsp->vfs_count > 1)
				return (EBUSY);
		} else {
			if (vfsp->vfs_count > 2 ||
			    zfsvfs->z_ctldir->v_count > 1) {
				return (EBUSY);
			}
		}
#endif
	}

	rw_enter(&zfsvfs->z_unmount_lock, RW_WRITER);
	rw_enter(&zfsvfs->z_unmount_inactive_lock, RW_WRITER);

	/*
	 * At this point there are no vops active, and any new vops will
	 * fail with EIO since we have z_unmount_lock for writer (only
	 * relavent for forced unmount).
	 *
	 * Release all holds on dbufs.
	 * Note, the dmu can still callback via znode_pageout_func()
	 * which can zfs_znode_free() the znode.  So we lock
	 * z_all_znodes; search the list for a held dbuf; drop the lock
	 * (we know zp can't disappear if we hold a dbuf lock) then
	 * regrab the lock and restart.
	 */
	mutex_enter(&zfsvfs->z_znodes_lock);
	for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) {
		nextzp = list_next(&zfsvfs->z_all_znodes, zp);
		if (zp->z_dbuf_held) {
			/* dbufs should only be held when force unmounting */
			zp->z_dbuf_held = 0;
			mutex_exit(&zfsvfs->z_znodes_lock);
			dmu_buf_rele(zp->z_dbuf, NULL);
			/* Start again */
			mutex_enter(&zfsvfs->z_znodes_lock);
			nextzp = list_head(&zfsvfs->z_all_znodes);
		}
	}
	mutex_exit(&zfsvfs->z_znodes_lock);

	/*
	 * Set the unmounted flag and let new vops unblock.
	 * zfs_inactive will have the unmounted behavior, and all other
	 * vops will fail with EIO.
	 */
	zfsvfs->z_unmounted = B_TRUE;
	rw_exit(&zfsvfs->z_unmount_lock);
	rw_exit(&zfsvfs->z_unmount_inactive_lock);

	/*
	 * Unregister properties.
	 */
#ifndef __APPLE__
	if (!dmu_objset_is_snapshot(os))
		zfs_unregister_callbacks(zfsvfs);
#endif
	/*
	 * Close the zil. NB: Can't close the zil while zfs_inactive
	 * threads are blocked as zil_close can call zfs_inactive.
	 */
	if (zfsvfs->z_log) {
		zil_close(zfsvfs->z_log);
		zfsvfs->z_log = NULL;
	}

	/*
	 * Evict all dbufs so that cached znodes will be freed
	 */
	if (dmu_objset_evict_dbufs(os, B_TRUE)) {
		txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
		(void) dmu_objset_evict_dbufs(os, B_FALSE);
	}

	/*
	 * Finally close the objset
	 */
	dmu_objset_close(os);

	/*
	 * We can now safely destroy the '.zfs' directory node.
	 */
#if 0
	if (zfsvfs->z_ctldir != NULL)
		zfsctl_destroy(zfsvfs);
#endif

	/*
	 * Note that this work is normally done in zfs_freevfs, but since
	 * there is no VOP_FREEVFS in OSX, we free VFS items here
	 */
	OSDecrementAtomic((SInt32 *)&zfs_active_fs_count);
 	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
 		mutex_destroy(&zfsvfs->z_hold_mtx[i]);
 
 	mutex_destroy(&zfsvfs->z_znodes_lock);
 	list_destroy(&zfsvfs->z_all_znodes);
 	rw_destroy(&zfsvfs->z_unmount_lock);
 	rw_destroy(&zfsvfs->z_unmount_inactive_lock);

	return (0);
}


 
struct vnode* vnode_getparent(struct vnode *vp);  /* sys/vnode_internal.h */

static int
zfs_vget_internal(zfsvfs_t *zfsvfs, ino64_t ino, struct vnode **vpp)
{
	struct vnode	*vp;
	struct vnode	*dvp = NULL;
	znode_t		*zp;
	int		error;

	*vpp = NULL;
	
	/*
	 * On Mac OS X we always export the root directory id as 2
	 * and its parent as 1
	 */
	if (ino == 2 || ino == 1)
		ino = zfsvfs->z_root;
	
	if ((error = zfs_zget(zfsvfs, ino, &zp)))
		goto out;

	/* Don't expose EA objects! */
	if (zp->z_phys->zp_flags & ZFS_XATTR) {
		vnode_put(ZTOV(zp));
		error = ENOENT;
		goto out;
	}

	*vpp = vp = ZTOV(zp);

	if (vnode_isvroot(vp))
		goto out;

	/*
	 * If this znode didn't just come from the cache then
	 * it won't have a valid identity (parent and name).
	 *
	 * Manually fix its identity here (normally done by namei lookup).
	 */
	if ((dvp = vnode_getparent(vp)) == NULL) {
		if (zp->z_phys->zp_parent != 0 &&
		    zfs_vget_internal(zfsvfs, zp->z_phys->zp_parent, &dvp)) {
			goto out;
		}
		if ( vnode_isdir(dvp) ) {
			char objname[ZAP_MAXNAMELEN];  /* 256 bytes */
			int flags = VNODE_UPDATE_PARENT;

			/* Look for znode's name in its parent's zap */
			if ( zap_value_search(zfsvfs->z_os,
			                      zp->z_phys->zp_parent, 
			                      zp->z_id,
			                      ZFS_DIRENT_OBJ(-1ULL),
			                      objname) == 0 ) {
				flags |= VNODE_UPDATE_NAME;
			}

			/* Update the znode's parent and name */
			vnode_update_identity(vp, dvp, objname, 0, 0, flags);
		}
	}
	/* All done with znode's parent */
	vnode_put(dvp);
out:
	return (error);
}

/*
 * Get a vnode from a file id (ignoring the generation)
 *
 * Use by NFS Server (readdirplus) and VFS (build_path)
 */
static int
zfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
{
	zfsvfs_t *zfsvfs = vfs_fsprivate(mp);
	int error;

	ZFS_ENTER(zfsvfs);

	/*
	 * On Mac OS X we always export the root directory id as 2.
	 * So we don't expect to see the real root directory id
	 * from zfs_vfs_vget KPI (unless of course the real id was
	 * already 2).
	 */
	if ((ino == zfsvfs->z_root) && (zfsvfs->z_root != 2)) {
		ZFS_EXIT(zfsvfs);
		return (ENOENT);
	}
	error = zfs_vget_internal(zfsvfs, ino, vpp);

	ZFS_EXIT(zfsvfs);
	return (error);
}
Example #3
0
static int
vnop_setattr_9p(struct vnop_setattr_args *ap)
{
	struct vnode_attr *vap;
	vnode_t vp;
	node_9p *np;
	dir_9p d;
	int e;

	TRACE();
	vp = ap->a_vp;
	vap = ap->a_vap;
	np = NTO9P(vp);

	if (vnode_vfsisrdonly(vp))
		return EROFS;
	
	if (vnode_isvroot(vp))
		return EACCES;

	nulldir(&d);
	if (VATTR_IS_ACTIVE(vap, va_data_size)) {
		if (vnode_isdir(vp))
			return EISDIR;
		d.length = vap->va_data_size;
	}
	VATTR_SET_SUPPORTED(vap, va_data_size);

	if (VATTR_IS_ACTIVE(vap, va_access_time))
		d.atime = vap->va_access_time.tv_sec;
	VATTR_SET_SUPPORTED(vap, va_access_time);

	if (VATTR_IS_ACTIVE(vap, va_modify_time))
		d.mtime = vap->va_modify_time.tv_sec;
	VATTR_SET_SUPPORTED(vap, va_modify_time);

	if (VATTR_IS_ACTIVE(vap, va_mode)) {
		d.mode = vap->va_mode & 0777;
		if (vnode_isdir(vp))
			SET(d.mode, DMDIR);
		if (ISSET(np->nmp->flags, F_DOTU)) {
			switch (vnode_vtype(vp)) {
			case VBLK:
			case VCHR:
				SET(d.mode, DMDEVICE);
				break;
			case VLNK:
				SET(d.mode, DMSYMLINK);
				break;
			case VSOCK:
				SET(d.mode, DMSOCKET);
				break;
			case VFIFO:
				SET(d.mode, DMNAMEDPIPE);
				break;
			default:
				break;
			}
		}
	}
	VATTR_SET_SUPPORTED(vap, va_mode);

	nlock_9p(np, NODE_LCK_EXCLUSIVE);
	e = wstat_9p(np->nmp, np->fid, &d);
	np->dirtimer = 0;

	if (e==0 && d.length!=~0)
		ubc_setsize(vp, d.length);

	nunlock_9p(np);
	return e;
}
Example #4
0
static int
vnop_lookup_9p(struct vnop_lookup_args *ap)
{
	struct componentname *cnp;
	node_9p *dnp;
	vnode_t *vpp, dvp;
	fid_9p fid;
	qid_9p qid;
	int e;

	TRACE();
	dvp = ap->a_dvp;
	vpp = ap->a_vpp;
	cnp = ap->a_cnp;
	dnp = NTO9P(dvp);

	if(!vnode_isdir(dvp))
		return ENOTDIR;

	if (isdotdot(cnp) && vnode_isvroot(dvp))
		return EIO;

	if (islastcn(cnp) && !isop(cnp, LOOKUP) && vnode_vfsisrdonly(dvp))
		return EROFS;

	if (isdot(cnp)) {
		if (islastcn(cnp) && isop(cnp, RENAME))
			return EISDIR;

		if ((e=vnode_get(dvp)))
			return e;
		*vpp = dvp;
		return 0;
	}

	if (isdotdot(cnp)) {
		*vpp = vnode_getparent(dvp);
		if (*vpp == NULL)
			return ENOENT;
		return 0;
	}

	e = cache_lookup(dvp, vpp, cnp);
	if (e == -1)	/* found */
		return 0;
	if (e != 0)		/* errno */
		return e;

	/* not in cache */
	nlock_9p(dnp, NODE_LCK_EXCLUSIVE);
	e = walk_9p(dnp->nmp, dnp->fid, cnp->cn_nameptr, cnp->cn_namelen, &fid, &qid);
	if (e) {
		if (islastcn(cnp)) {
			if (isop(cnp, CREATE) || isop(cnp, RENAME))
				e = EJUSTRETURN;
			else if (ismkentry(cnp) && dnp->dir.qid.vers!=0)
				cache_enter(dvp, NULL, cnp);
		}
		goto error;
	}

	e = nget_9p(dnp->nmp, fid, qid, dvp, vpp, cnp, ap->a_context);
	if (e || *vpp==NULL || NTO9P(*vpp)->fid!=fid) 
		clunk_9p(dnp->nmp, fid);

	if (*vpp)
		nunlock_9p(NTO9P(*vpp));

error:
	nunlock_9p(dnp);
	return e;
}
Example #5
0
/*
    struct vnop_lookup_args {
	struct vnodeop_desc *a_desc;
	struct vnode *a_dvp;
	struct vnode **a_vpp;
	struct componentname *a_cnp;
    };
*/
int
fuse_vnop_lookup(struct vop_lookup_args *ap)
{
	struct vnode *dvp = ap->a_dvp;
	struct vnode **vpp = ap->a_vpp;
	struct componentname *cnp = ap->a_cnp;
	struct thread *td = cnp->cn_thread;
	struct ucred *cred = cnp->cn_cred;

	int nameiop = cnp->cn_nameiop;
	int flags = cnp->cn_flags;
	int wantparent = flags & (LOCKPARENT | WANTPARENT);
	int islastcn = flags & ISLASTCN;
	struct mount *mp = vnode_mount(dvp);

	int err = 0;
	int lookup_err = 0;
	struct vnode *vp = NULL;

	struct fuse_dispatcher fdi;
	enum fuse_opcode op;

	uint64_t nid;
	struct fuse_access_param facp;

	FS_DEBUG2G("parent_inode=%ju - %*s\n",
	    (uintmax_t)VTOI(dvp), (int)cnp->cn_namelen, cnp->cn_nameptr);

	if (fuse_isdeadfs(dvp)) {
		*vpp = NULL;
		return ENXIO;
	}
	if (!vnode_isdir(dvp)) {
		return ENOTDIR;
	}
	if (islastcn && vfs_isrdonly(mp) && (nameiop != LOOKUP)) {
		return EROFS;
	}
	/*
         * We do access check prior to doing anything else only in the case
         * when we are at fs root (we'd like to say, "we are at the first
         * component", but that's not exactly the same... nevermind).
         * See further comments at further access checks.
         */

	bzero(&facp, sizeof(facp));
	if (vnode_isvroot(dvp)) {	/* early permission check hack */
		if ((err = fuse_internal_access(dvp, VEXEC, &facp, td, cred))) {
			return err;
		}
	}
	if (flags & ISDOTDOT) {
		nid = VTOFUD(dvp)->parent_nid;
		if (nid == 0) {
			return ENOENT;
		}
		fdisp_init(&fdi, 0);
		op = FUSE_GETATTR;
		goto calldaemon;
	} else if (cnp->cn_namelen == 1 && *(cnp->cn_nameptr) == '.') {
		nid = VTOI(dvp);
		fdisp_init(&fdi, 0);
		op = FUSE_GETATTR;
		goto calldaemon;
	} else if (fuse_lookup_cache_enable) {
		err = cache_lookup(dvp, vpp, cnp, NULL, NULL);
		switch (err) {

		case -1:		/* positive match */
			atomic_add_acq_long(&fuse_lookup_cache_hits, 1);
			return 0;

		case 0:		/* no match in cache */
			atomic_add_acq_long(&fuse_lookup_cache_misses, 1);
			break;

		case ENOENT:		/* negative match */
			/* fall through */
		default:
			return err;
		}
	}
	nid = VTOI(dvp);
	fdisp_init(&fdi, cnp->cn_namelen + 1);
	op = FUSE_LOOKUP;

calldaemon:
	fdisp_make(&fdi, op, mp, nid, td, cred);

	if (op == FUSE_LOOKUP) {
		memcpy(fdi.indata, cnp->cn_nameptr, cnp->cn_namelen);
		((char *)fdi.indata)[cnp->cn_namelen] = '\0';
	}
	lookup_err = fdisp_wait_answ(&fdi);

	if ((op == FUSE_LOOKUP) && !lookup_err) {	/* lookup call succeeded */
		nid = ((struct fuse_entry_out *)fdi.answ)->nodeid;
		if (!nid) {
			/*
	                 * zero nodeid is the same as "not found",
	                 * but it's also cacheable (which we keep
	                 * keep on doing not as of writing this)
	                 */
			lookup_err = ENOENT;
		} else if (nid == FUSE_ROOT_ID) {
			lookup_err = EINVAL;
		}
	}
	if (lookup_err &&
	    (!fdi.answ_stat || lookup_err != ENOENT || op != FUSE_LOOKUP)) {
		fdisp_destroy(&fdi);
		return lookup_err;
	}
	/* lookup_err, if non-zero, must be ENOENT at this point */

	if (lookup_err) {

		if ((nameiop == CREATE || nameiop == RENAME) && islastcn
		     /* && directory dvp has not been removed */ ) {

			if (vfs_isrdonly(mp)) {
				err = EROFS;
				goto out;
			}
#if 0 /* THINK_ABOUT_THIS */
			if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
				goto out;
			}
#endif

			/*
	                 * Possibly record the position of a slot in the
	                 * directory large enough for the new component name.
	                 * This can be recorded in the vnode private data for
	                 * dvp. Set the SAVENAME flag to hold onto the
	                 * pathname for use later in VOP_CREATE or VOP_RENAME.
	                 */
			cnp->cn_flags |= SAVENAME;

			err = EJUSTRETURN;
			goto out;
		}
		/* Consider inserting name into cache. */

		/*
	         * No we can't use negative caching, as the fs
	         * changes are out of our control.
	         * False positives' falseness turns out just as things
	         * go by, but false negatives' falseness doesn't.
	         * (and aiding the caching mechanism with extra control
	         * mechanisms comes quite close to beating the whole purpose
	         * caching...)
	         */
#if 0
		if ((cnp->cn_flags & MAKEENTRY) && nameiop != CREATE) {
			FS_DEBUG("inserting NULL into cache\n");
			cache_enter(dvp, NULL, cnp);
		}
#endif
		err = ENOENT;
		goto out;

	} else {

		/* !lookup_err */

		struct fuse_entry_out *feo = NULL;
		struct fuse_attr *fattr = NULL;

		if (op == FUSE_GETATTR) {
			fattr = &((struct fuse_attr_out *)fdi.answ)->attr;
		} else {
			feo = (struct fuse_entry_out *)fdi.answ;
			fattr = &(feo->attr);
		}

		/*
	         * If deleting, and at end of pathname, return parameters
	         * which can be used to remove file.  If the wantparent flag
	         * isn't set, we return only the directory, otherwise we go on
	         * and lock the inode, being careful with ".".
	         */
		if (nameiop == DELETE && islastcn) {
			/*
	                 * Check for write access on directory.
	                 */
			facp.xuid = fattr->uid;
			facp.facc_flags |= FACCESS_STICKY;
			err = fuse_internal_access(dvp, VWRITE, &facp, td, cred);
			facp.facc_flags &= ~FACCESS_XQUERIES;

			if (err) {
				goto out;
			}
			if (nid == VTOI(dvp)) {
				vref(dvp);
				*vpp = dvp;
			} else {
				err = fuse_vnode_get(dvp->v_mount, nid, dvp,
				    &vp, cnp, IFTOVT(fattr->mode));
				if (err)
					goto out;
				*vpp = vp;
			}

			/*
			 * Save the name for use in VOP_RMDIR and VOP_REMOVE
			 * later.
			 */
			cnp->cn_flags |= SAVENAME;
			goto out;

		}
		/*
	         * If rewriting (RENAME), return the inode and the
	         * information required to rewrite the present directory
	         * Must get inode of directory entry to verify it's a
	         * regular file, or empty directory.
	         */
		if (nameiop == RENAME && wantparent && islastcn) {

#if 0 /* THINK_ABOUT_THIS */
			if ((err = fuse_internal_access(dvp, VWRITE, cred, td, &facp))) {
				goto out;
			}
#endif

			/*
	                 * Check for "."
	                 */
			if (nid == VTOI(dvp)) {
				err = EISDIR;
				goto out;
			}
			err = fuse_vnode_get(vnode_mount(dvp),
			    nid,
			    dvp,
			    &vp,
			    cnp,
			    IFTOVT(fattr->mode));
			if (err) {
				goto out;
			}
			*vpp = vp;
			/*
	                 * Save the name for use in VOP_RENAME later.
	                 */
			cnp->cn_flags |= SAVENAME;

			goto out;
		}
		if (flags & ISDOTDOT) {
			struct mount *mp;
			int ltype;

			/*
			 * Expanded copy of vn_vget_ino() so that
			 * fuse_vnode_get() can be used.
			 */
			mp = dvp->v_mount;
			ltype = VOP_ISLOCKED(dvp);
			err = vfs_busy(mp, MBF_NOWAIT);
			if (err != 0) {
				vfs_ref(mp);
				VOP_UNLOCK(dvp, 0);
				err = vfs_busy(mp, 0);
				vn_lock(dvp, ltype | LK_RETRY);
				vfs_rel(mp);
				if (err)
					goto out;
				if ((dvp->v_iflag & VI_DOOMED) != 0) {
					err = ENOENT;
					vfs_unbusy(mp);
					goto out;
				}
			}
			VOP_UNLOCK(dvp, 0);
			err = fuse_vnode_get(vnode_mount(dvp),
			    nid,
			    NULL,
			    &vp,
			    cnp,
			    IFTOVT(fattr->mode));
			vfs_unbusy(mp);
			vn_lock(dvp, ltype | LK_RETRY);
			if ((dvp->v_iflag & VI_DOOMED) != 0) {
				if (err == 0)
					vput(vp);
				err = ENOENT;
			}
			if (err)
				goto out;
			*vpp = vp;
		} else if (nid == VTOI(dvp)) {
			vref(dvp);
			*vpp = dvp;
		} else {
			err = fuse_vnode_get(vnode_mount(dvp),
			    nid,
			    dvp,
			    &vp,
			    cnp,
			    IFTOVT(fattr->mode));
			if (err) {
				goto out;
			}
			fuse_vnode_setparent(vp, dvp);
			*vpp = vp;
		}

		if (op == FUSE_GETATTR) {
			cache_attrs(*vpp, (struct fuse_attr_out *)fdi.answ);
		} else {
			cache_attrs(*vpp, (struct fuse_entry_out *)fdi.answ);
		}

		/* Insert name into cache if appropriate. */

		/*
	         * Nooo, caching is evil. With caching, we can't avoid stale
	         * information taking over the playground (cached info is not
	         * just positive/negative, it does have qualitative aspects,
	         * too). And a (VOP/FUSE)_GETATTR is always thrown anyway, when
	         * walking down along cached path components, and that's not
	         * any cheaper than FUSE_LOOKUP. This might change with
	         * implementing kernel side attr caching, but... In Linux,
	         * lookup results are not cached, and the daemon is bombarded
	         * with FUSE_LOOKUPS on and on. This shows that by design, the
	         * daemon is expected to handle frequent lookup queries
	         * efficiently, do its caching in userspace, and so on.
	         *
	         * So just leave the name cache alone.
	         */

		/*
	         * Well, now I know, Linux caches lookups, but with a
	         * timeout... So it's the same thing as attribute caching:
	         * we can deal with it when implement timeouts.
	         */
#if 0
		if (cnp->cn_flags & MAKEENTRY) {
			cache_enter(dvp, *vpp, cnp);
		}
#endif
	}
out:
	if (!lookup_err) {

		/* No lookup error; need to clean up. */

		if (err) {		/* Found inode; exit with no vnode. */
			if (op == FUSE_LOOKUP) {
				fuse_internal_forget_send(vnode_mount(dvp), td, cred,
				    nid, 1);
			}
			fdisp_destroy(&fdi);
			return err;
		} else {
#ifndef NO_EARLY_PERM_CHECK_HACK
			if (!islastcn) {
				/*
				 * We have the attributes of the next item
				 * *now*, and it's a fact, and we do not
				 * have to do extra work for it (ie, beg the
				 * daemon), and it neither depends on such
				 * accidental things like attr caching. So
				 * the big idea: check credentials *now*,
				 * not at the beginning of the next call to
				 * lookup.
				 * 
				 * The first item of the lookup chain (fs root)
				 * won't be checked then here, of course, as
				 * its never "the next". But go and see that
				 * the root is taken care about at the very
				 * beginning of this function.
				 * 
				 * Now, given we want to do the access check
				 * this way, one might ask: so then why not
				 * do the access check just after fetching
				 * the inode and its attributes from the
				 * daemon? Why bother with producing the
				 * corresponding vnode at all if something
				 * is not OK? We know what's the deal as
				 * soon as we get those attrs... There is
				 * one bit of info though not given us by
				 * the daemon: whether his response is
				 * authorative or not... His response should
				 * be ignored if something is mounted over
				 * the dir in question. But that can be
				 * known only by having the vnode...
				 */
				int tmpvtype = vnode_vtype(*vpp);

				bzero(&facp, sizeof(facp));
				/*the early perm check hack */
				    facp.facc_flags |= FACCESS_VA_VALID;

				if ((tmpvtype != VDIR) && (tmpvtype != VLNK)) {
					err = ENOTDIR;
				}
				if (!err && !vnode_mountedhere(*vpp)) {
					err = fuse_internal_access(*vpp, VEXEC, &facp, td, cred);
				}
				if (err) {
					if (tmpvtype == VLNK)
						FS_DEBUG("weird, permission error with a symlink?\n");
					vput(*vpp);
					*vpp = NULL;
				}
			}
#endif
		}
	}
	fdisp_destroy(&fdi);

	return err;
}
Example #6
0
/*
    struct vnop_getattr_args {
	struct vnode *a_vp;
	struct vattr *a_vap;
	struct ucred *a_cred;
	struct thread *a_td;
    };
*/
static int
fuse_vnop_getattr(struct vop_getattr_args *ap)
{
	struct vnode *vp = ap->a_vp;
	struct vattr *vap = ap->a_vap;
	struct ucred *cred = ap->a_cred;
	struct thread *td = curthread;
	struct fuse_vnode_data *fvdat = VTOFUD(vp);

	int err = 0;
	int dataflags;
	struct fuse_dispatcher fdi;

	FS_DEBUG2G("inode=%ju\n", (uintmax_t)VTOI(vp));

	dataflags = fuse_get_mpdata(vnode_mount(vp))->dataflags;

	/* Note that we are not bailing out on a dead file system just yet. */

	if (!(dataflags & FSESS_INITED)) {
		if (!vnode_isvroot(vp)) {
			fdata_set_dead(fuse_get_mpdata(vnode_mount(vp)));
			err = ENOTCONN;
			debug_printf("fuse_getattr b: returning ENOTCONN\n");
			return err;
		} else {
			goto fake;
		}
	}
	fdisp_init(&fdi, 0);
	if ((err = fdisp_simple_putget_vp(&fdi, FUSE_GETATTR, vp, td, cred))) {
		if ((err == ENOTCONN) && vnode_isvroot(vp)) {
			/* see comment at similar place in fuse_statfs() */
			fdisp_destroy(&fdi);
			goto fake;
		}
		if (err == ENOENT) {
			fuse_internal_vnode_disappear(vp);
		}
		goto out;
	}
	cache_attrs(vp, (struct fuse_attr_out *)fdi.answ);
	if (vap != VTOVA(vp)) {
		memcpy(vap, VTOVA(vp), sizeof(*vap));
	}
	if (vap->va_type != vnode_vtype(vp)) {
		fuse_internal_vnode_disappear(vp);
		err = ENOENT;
		goto out;
	}
	if ((fvdat->flag & FN_SIZECHANGE) != 0)
		vap->va_size = fvdat->filesize;

	if (vnode_isreg(vp) && (fvdat->flag & FN_SIZECHANGE) == 0) {
		/*
	         * This is for those cases when the file size changed without us
	         * knowing, and we want to catch up.
	         */
		off_t new_filesize = ((struct fuse_attr_out *)
				      fdi.answ)->attr.size;

		if (fvdat->filesize != new_filesize) {
			fuse_vnode_setsize(vp, cred, new_filesize);
		}
	}
	debug_printf("fuse_getattr e: returning 0\n");

out:
	fdisp_destroy(&fdi);
	return err;

fake:
	bzero(vap, sizeof(*vap));
	vap->va_type = vnode_vtype(vp);

	return 0;
}
Example #7
0
int
afs_getattr(OSI_VC_DECL(avc), struct vattr *attrs, afs_ucred_t *acred)
#endif
{
    afs_int32 code;
    struct vrequest *treq = NULL;
    struct unixuser *au;
    int inited = 0;
    OSI_VC_CONVERT(avc);

    AFS_STATCNT(afs_getattr);
    afs_Trace2(afs_iclSetp, CM_TRACE_GETATTR, ICL_TYPE_POINTER, avc,
	       ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length));

    if (afs_fakestat_enable && avc->mvstat == AFS_MVSTAT_MTPT) {
	struct afs_fakestat_state fakestat;
	struct vrequest *ureq = NULL;

	code = afs_CreateReq(&ureq, acred);
	if (code) {
	    return code;
	}
	afs_InitFakeStat(&fakestat);
	code = afs_TryEvalFakeStat(&avc, &fakestat, ureq);
	if (code) {
	    afs_PutFakeStat(&fakestat);
	    afs_DestroyReq(ureq);
	    return code;
	}

	code = afs_CopyOutAttrs(avc, attrs);
	afs_PutFakeStat(&fakestat);
	afs_DestroyReq(ureq);
	return code;
    }
#if defined(AFS_SUN5_ENV)
    if (flags & ATTR_HINT) {
	code = afs_CopyOutAttrs(avc, attrs);
	return code;
    }
#endif
#if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV)
    if (avc->f.states & CUBCinit) {
	code = afs_CopyOutAttrs(avc, attrs);
	return code;
    }
#endif

    AFS_DISCON_LOCK();

    if (afs_shuttingdown != AFS_RUNNING) {
	AFS_DISCON_UNLOCK();
	return EIO;
    }

    if (!(avc->f.states & CStatd)) {
	if (!(code = afs_CreateReq(&treq, acred))) {
	    code = afs_VerifyVCache2(avc, treq);
	    inited = 1;
	}
    } else
	code = 0;

#if defined(AFS_SUN5_ENV)
    if (code == 0)
	osi_FlushPages(avc, acred);
#endif

    if (code == 0) {
	osi_FlushText(avc);	/* only needed to flush text if text locked last time */
	code = afs_CopyOutAttrs(avc, attrs);

	if (afs_nfsexporter) {
	    if (!inited) {
		if ((code = afs_CreateReq(&treq, acred))) {
		    return code;
		}
		inited = 1;
	    }
	    if (AFS_NFSXLATORREQ(acred)) {
		if ((vType(avc) != VDIR)
		    && !afs_AccessOK(avc, PRSFS_READ, treq,
				     CHECK_MODE_BITS |
				     CMB_ALLOW_EXEC_AS_READ)) {
		    afs_DestroyReq(treq);
		    return EACCES;
		}
	    }
	    if ((au = afs_FindUser(treq->uid, -1, READ_LOCK))) {
		struct afs_exporter *exporter = au->exporter;

		if (exporter && !(afs_nfsexporter->exp_states & EXP_UNIXMODE)) {
		    unsigned int ubits;
		    /*
		     *  If the remote user wishes to enforce default Unix mode semantics,
		     *  like in the nfs exporter case, we OR in the user bits
		     *  into the group and other bits. We need to do this
		     *  because there is no RFS_ACCESS call and thus nfs
		     *  clients implement nfs_access by interpreting the 
		     *  mode bits in the traditional way, which of course
		     *  loses with afs.
		     */
		    ubits = (attrs->va_mode & 0700) >> 6;
		    attrs->va_mode = attrs->va_mode | ubits | (ubits << 3);
		    /* If it's the root of AFS, replace the inode number with the
		     * inode number of the mounted on directory; otherwise this 
		     * confuses getwd()... */
#ifdef AFS_LINUX22_ENV
		    if (avc == afs_globalVp) {
			struct inode *ip = AFSTOV(avc)->i_sb->s_root->d_inode;
			attrs->va_nodeid = ip->i_ino;	/* VTOI()? */
		    }
#else
		    if (
#if defined(AFS_DARWIN_ENV)
			vnode_isvroot(AFSTOV(avc))
#elif defined(AFS_NBSD50_ENV)
			AFSTOV(avc)->v_vflag & VV_ROOT
#else
			AFSTOV(avc)->v_flag & VROOT
#endif
			) {
			struct vnode *vp = AFSTOV(avc);

#ifdef AFS_DARWIN80_ENV
			/* XXX vp = vnode_mount(vp)->mnt_vnodecovered; */
			vp = 0;
#else
			vp = vp->v_vfsp->vfs_vnodecovered;
			if (vp) {	/* Ignore weird failures */
#ifdef AFS_SGI62_ENV
			    attrs->va_nodeid = VnodeToIno(vp);
#else
			    struct inode *ip;

			    ip = (struct inode *)VTOI(vp);
			    if (ip)	/* Ignore weird failures */
				attrs->va_nodeid = ip->i_number;
#endif
			}
#endif
		    }
#endif /* AFS_LINUX22_ENV */
		}
		afs_PutUser(au, READ_LOCK);
	    }
	}
Example #8
0
static int
devfs_getattr(struct vnop_getattr_args *ap)
        /*struct vnop_getattr_args {
                struct vnode *a_vp;
                struct vnode_attr *a_vap;
                kauth_cred_t a_cred;
                struct proc *a_p;
        } */ 
{
	struct vnode *vp = ap->a_vp;
	struct vnode_attr *vap = ap->a_vap;
	devnode_t *	file_node;
	struct timeval now;


	DEVFS_LOCK();
	file_node = VTODN(vp);

	microtime(&now);
	dn_times(file_node, &now, &now, &now);

	VATTR_RETURN(vap, va_mode, file_node->dn_mode);

	/*
	 * Note: for DEV_CDEV and DEV_BDEV, we return the device from
	 * the vp, not the file_node; if we getting information on a
	 * cloning device, we want the cloned information, not the template.
	 */
	switch (file_node->dn_type)
	{
	case 	DEV_DIR:
#if FDESC
	case	DEV_DEVFD:	/* Like a directory */
#endif /* FDESC */
		VATTR_RETURN(vap, va_rdev,  0);
		vap->va_mode |= (S_IFDIR);
		break;
	case	DEV_CDEV:
		VATTR_RETURN(vap, va_rdev, vp->v_rdev);
		vap->va_mode |= (S_IFCHR);
		break;
	case	DEV_BDEV:
		VATTR_RETURN(vap, va_rdev, vp->v_rdev);
		vap->va_mode |= (S_IFBLK);
		break;
	case	DEV_SLNK:
		VATTR_RETURN(vap, va_rdev, 0);
		vap->va_mode |= (S_IFLNK);
		break;
	default:
		VATTR_RETURN(vap, va_rdev, 0);	/* default value only */
	}
	VATTR_RETURN(vap, va_type, vp->v_type);
	VATTR_RETURN(vap, va_nlink, file_node->dn_links);
	VATTR_RETURN(vap, va_uid, file_node->dn_uid);
	VATTR_RETURN(vap, va_gid, file_node->dn_gid);
	VATTR_RETURN(vap, va_fsid, (uintptr_t)file_node->dn_dvm);
	VATTR_RETURN(vap, va_fileid, (uintptr_t)file_node->dn_ino);
	VATTR_RETURN(vap, va_data_size, file_node->dn_len);

	/* return an override block size (advisory) */
	if (vp->v_type == VBLK)
		VATTR_RETURN(vap, va_iosize, BLKDEV_IOSIZE);
	else if (vp->v_type == VCHR)
		VATTR_RETURN(vap, va_iosize, MAXPHYSIO);
	else
		VATTR_RETURN(vap, va_iosize, vp->v_mount->mnt_vfsstat.f_iosize);
	/* if the time is bogus, set it to the boot time */
	if (file_node->dn_ctime.tv_sec == 0) {
		file_node->dn_ctime.tv_sec = boottime_sec();
		file_node->dn_ctime.tv_nsec = 0;
	}
	if (file_node->dn_mtime.tv_sec == 0)
	    file_node->dn_mtime = file_node->dn_ctime;
	if (file_node->dn_atime.tv_sec == 0)
	    file_node->dn_atime = file_node->dn_ctime;
	VATTR_RETURN(vap, va_change_time, file_node->dn_ctime);
	VATTR_RETURN(vap, va_modify_time, file_node->dn_mtime);
	VATTR_RETURN(vap, va_access_time, file_node->dn_atime);
	VATTR_RETURN(vap, va_gen, 0);
	VATTR_RETURN(vap, va_filerev, 0);
	VATTR_RETURN(vap, va_acl, NULL);

	/* Hide the root so Finder doesn't display it */
	if (vnode_isvroot(vp)) {
		VATTR_RETURN(vap, va_flags, UF_HIDDEN);
	} else {
		VATTR_RETURN(vap, va_flags, 0);
	}

	DEVFS_UNLOCK();

	return 0;
}
Example #9
0
int
zfs_getattr_znode_unlocked(struct vnode *vp, vattr_t *vap)
{
	znode_t *zp = VTOZ(vp);
	zfsvfs_t *zfsvfs = zp->z_zfsvfs;
	int error = 0;
	uint64_t	parent;

    //printf("getattr_osx\n");

	ZFS_ENTER(zfsvfs);
	/*
	 * On Mac OS X we always export the root directory id as 2
	 */
	vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_id;
	//vap->va_fileid = (zp->z_id == zfsvfs->z_root) ? 2 : zp->z_vid;
	vap->va_nlink = zp->z_links;
	vap->va_data_size = zp->z_size;
	vap->va_total_size = zp->z_size;
	vap->va_gen = zp->z_gen;

	/*
	 * For Carbon compatibility,pretend to support this legacy/unused attribute
	 */
	if (VATTR_IS_ACTIVE(vap, va_backup_time)) {
		vap->va_backup_time.tv_sec = 0;
		vap->va_backup_time.tv_nsec = 0;
		VATTR_SET_SUPPORTED(vap, va_backup_time);
    }
	vap->va_flags = zfs_getbsdflags(zp);
	/*
	 * On Mac OS X we always export the root directory id as 2
     * and its parent as 1
	 */
	error = sa_lookup(zp->z_sa_hdl, SA_ZPL_PARENT(zfsvfs),
                      &parent, sizeof (parent));

    if (!error) {
        if (zp->z_id == zfsvfs->z_root)
            vap->va_parentid = 1;
        else if (parent == zfsvfs->z_root)
            vap->va_parentid = 2;
        else
            vap->va_parentid = parent;
    }

	vap->va_iosize = zp->z_blksz ? zp->z_blksz : zfsvfs->z_max_blksz;
	//vap->va_iosize = 512;
    VATTR_SET_SUPPORTED(vap, va_iosize);

	/* Don't include '.' and '..' in the number of entries */
	if (VATTR_IS_ACTIVE(vap, va_nchildren) && vnode_isdir(vp)) {
		VATTR_RETURN(vap, va_nchildren, vap->va_nlink - 2);
    }

	/*
	 * va_dirlinkcount is the count of directory hard links. When a file
	 * system does not support ATTR_DIR_LINKCOUNT, xnu will default to 1.
	 * Since we claim to support ATTR_DIR_LINKCOUNT both as valid and as
	 * native, we'll just return 1. We set 1 for this value in dirattrpack
	 * as well. If in the future ZFS actually supports directory hard links,
	 * we can return a real value.
	 */
	if (VATTR_IS_ACTIVE(vap, va_dirlinkcount) && vnode_isdir(vp)) {
		VATTR_RETURN(vap, va_dirlinkcount, 1);
    }

	if (VATTR_IS_ACTIVE(vap, va_acl)) {
        //printf("want acl\n");
#if 0
        zfs_acl_phys_t acl;

        if (sa_lookup(zp->z_sa_hdl, SA_ZPL_ZNODE_ACL(zfsvfs),
                      &acl, sizeof (zfs_acl_phys_t))) {
            //if (zp->z_acl.z_acl_count == 0) {
			vap->va_acl = (kauth_acl_t) KAUTH_FILESEC_NONE;
		} else {
			if ((error = zfs_getacl(zp, &vap->va_acl, B_TRUE, NULL))) {
                dprintf("zfs_getacl returned error %d\n", error);
                error = 0;
				//ZFS_EXIT(zfsvfs);
				//return (error);
			}
		}

#endif
      //VATTR_SET_SUPPORTED(vap, va_acl);
        VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
        VATTR_RETURN(vap, va_guuid, kauth_null_guid);

        dprintf("Calling getacl\n");
        if ((error = zfs_getacl(zp, &vap->va_acl, B_FALSE, NULL))) {
            dprintf("zfs_getacl returned error %d\n", error);
            error = 0;
        } else {

            VATTR_SET_SUPPORTED(vap, va_acl);
            /* va_acl implies that va_uuuid and va_guuid are also supported. */
            VATTR_RETURN(vap, va_uuuid, kauth_null_guid);
            VATTR_RETURN(vap, va_guuid, kauth_null_guid);
        }

    }

	if (VATTR_IS_ACTIVE(vap, va_data_alloc) || VATTR_IS_ACTIVE(vap, va_total_alloc)) {
		uint32_t  blksize;
		u_longlong_t  nblks;
        sa_object_size(zp->z_sa_hdl, &blksize, &nblks);
		vap->va_data_alloc = (uint64_t)512LL * (uint64_t)nblks;
		vap->va_total_alloc = vap->va_data_alloc;
		vap->va_supported |= VNODE_ATTR_va_data_alloc |
            VNODE_ATTR_va_total_alloc;
	}

	if (VATTR_IS_ACTIVE(vap, va_name)) {
        vap->va_name[0] = 0;

        if (!vnode_isvroot(vp)) {
            /* Lets not supply name as zap_cursor can cause panic */
#if 0
            if (zap_value_search(zfsvfs->z_os, parent, zp->z_id,
                                 ZFS_DIRENT_OBJ(-1ULL), vap->va_name) == 0)
                VATTR_SET_SUPPORTED(vap, va_name);
#endif
        } else {
            /*
             * The vroot objects must return a unique name for Finder to
             * be able to distringuish between mounts. For this reason
             * we simply return the fullname, from the statfs mountedfrom
             */
            strlcpy(vap->va_name,
                    vfs_statfs(vnode_mount(vp))->f_mntfromname,
                    MAXPATHLEN);
            VATTR_SET_SUPPORTED(vap, va_name);
        }
	}

	if (VATTR_IS_ACTIVE(vap, va_filerev)) {
        VATTR_RETURN(vap, va_filerev, 0);
    }
	if (VATTR_IS_ACTIVE(vap, va_linkid)) {
        VATTR_RETURN(vap, va_linkid, vap->va_fileid);
    }
	if (VATTR_IS_ACTIVE(vap, va_fsid)) {
        VATTR_RETURN(vap, va_fsid, vfs_statfs(zfsvfs->z_vfs)->f_fsid.val[0]);
    }
	if (VATTR_IS_ACTIVE(vap, va_type)) {
        VATTR_RETURN(vap, va_type, vnode_vtype(ZTOV(zp)));
    }
	if (VATTR_IS_ACTIVE(vap, va_encoding)) {
        VATTR_RETURN(vap, va_encoding, kTextEncodingMacUnicode);
    }
#ifdef VNODE_ATTR_va_addedtime
	if (VATTR_IS_ACTIVE(vap, va_addedtime)) {
        VATTR_RETURN(vap, va_addedtime, vap->va_ctime);
    }
#endif
	if (VATTR_IS_ACTIVE(vap, va_uuuid)) {
        kauth_cred_uid2guid(zp->z_uid, &vap->va_uuuid);
    }
	if (VATTR_IS_ACTIVE(vap, va_guuid)) {
        kauth_cred_uid2guid(zp->z_gid, &vap->va_guuid);
    }

	vap->va_supported |= ZFS_SUPPORTED_VATTRS;

	ZFS_EXIT(zfsvfs);
	return (error);
}
Example #10
0
int
fuse_internal_access(struct vnode *vp,
    mode_t mode,
    struct fuse_access_param *facp,
    struct thread *td,
    struct ucred *cred)
{
	int err = 0;
	uint32_t mask = 0;
	int dataflags;
	int vtype;
	struct mount *mp;
	struct fuse_dispatcher fdi;
	struct fuse_access_in *fai;
	struct fuse_data *data;

	/* NOT YET DONE */
	/*
	 * If this vnop gives you trouble, just return 0 here for a lazy
	 * kludge.
	 */
	/* return 0;*/

	fuse_trace_printf_func();

	mp = vnode_mount(vp);
	vtype = vnode_vtype(vp);

	data = fuse_get_mpdata(mp);
	dataflags = data->dataflags;

	if ((mode & VWRITE) && vfs_isrdonly(mp)) {
		return EACCES;
	}
	/* Unless explicitly permitted, deny everyone except the fs owner. */
	    if (vnode_isvroot(vp) && !(facp->facc_flags & FACCESS_NOCHECKSPY)) {
		if (!(dataflags & FSESS_DAEMON_CAN_SPY)) {
			int denied = fuse_match_cred(data->daemoncred,
			    cred);

			if (denied) {
				return EPERM;
			}
		}
		facp->facc_flags |= FACCESS_NOCHECKSPY;
	}
	if (!(facp->facc_flags & FACCESS_DO_ACCESS)) {
		return 0;
	}
	if (((vtype == VREG) && (mode & VEXEC))) {
#ifdef NEED_MOUNT_ARGUMENT_FOR_THIS
		/* Let	 the kernel handle this through open / close heuristics.*/
		    return ENOTSUP;
#else
		    /* 	Let the kernel handle this. */
		    return 0;
#endif
	}
	if (!fsess_isimpl(mp, FUSE_ACCESS)) {
		/* Let the kernel handle this. */
		    return 0;
	}
	if (dataflags & FSESS_DEFAULT_PERMISSIONS) {
		/* Let the kernel handle this. */
		    return 0;
	}
	if ((mode & VADMIN) != 0) {
		err = priv_check_cred(cred, PRIV_VFS_ADMIN, 0);
		if (err) {
			return err;
		}
	}
	if ((mode & (VWRITE | VAPPEND | VADMIN)) != 0) {
		mask |= W_OK;
	}
	if ((mode & VREAD) != 0) {
		mask |= R_OK;
	}
	if ((mode & VEXEC) != 0) {
		mask |= X_OK;
	}
	bzero(&fdi, sizeof(fdi));

	fdisp_init(&fdi, sizeof(*fai));
	fdisp_make_vp(&fdi, FUSE_ACCESS, vp, td, cred);

	fai = fdi.indata;
	fai->mask = F_OK;
	fai->mask |= mask;

	err = fdisp_wait_answ(&fdi);
	fdisp_destroy(&fdi);

	if (err == ENOSYS) {
		fsess_set_notimpl(mp, FUSE_ACCESS);
		err = 0;
	}
	return err;
}
Example #11
0
__private_extern__
int
fuse_internal_access(vnode_t                   vp,
                     int                       action,
                     vfs_context_t             context,
                     struct fuse_access_param *facp)
{
    int err = 0;
    int default_error = 0;
    uint32_t mask = 0;
    int dataflags;
    mount_t mp;
    struct fuse_dispatcher fdi;
    struct fuse_access_in *fai;
    struct fuse_data      *data;

    fuse_trace_printf_func();

    mp = vnode_mount(vp);

    data = fuse_get_mpdata(mp);
    dataflags = data->dataflags;

    /* Allow for now; let checks be handled inline later. */
    if (fuse_isdeferpermissions_mp(mp)) {
        return 0;
    }

    if (facp->facc_flags & FACCESS_FROM_VNOP) {
        default_error = ENOTSUP;
    }

    /*
     * (action & KAUTH_VNODE_GENERIC_WRITE_BITS) on a read-only file system
     * would have been handled by higher layers.
     */

    if (!fuse_implemented(data, FSESS_NOIMPLBIT(ACCESS))) {
        return default_error;
    }

    /* Unless explicitly permitted, deny everyone except the fs owner. */
    if (!vnode_isvroot(vp) && !(facp->facc_flags & FACCESS_NOCHECKSPY)) {
        if (!(dataflags & FSESS_ALLOW_OTHER)) {
            int denied = fuse_match_cred(data->daemoncred,
                                         vfs_context_ucred(context));
            if (denied) {
                return EPERM;
            }
        }
        facp->facc_flags |= FACCESS_NOCHECKSPY;
    }

    if (!(facp->facc_flags & FACCESS_DO_ACCESS)) {
        return default_error;
    }

    if (vnode_isdir(vp)) {
        if (action & (KAUTH_VNODE_LIST_DIRECTORY   |
                      KAUTH_VNODE_READ_EXTATTRIBUTES)) {
            mask |= R_OK;
        }
        if (action & (KAUTH_VNODE_ADD_FILE         |
                      KAUTH_VNODE_ADD_SUBDIRECTORY |
                      KAUTH_VNODE_DELETE_CHILD)) {
            mask |= W_OK;
        }
        if (action & KAUTH_VNODE_SEARCH) {
            mask |= X_OK;
        }
    } else {
        if (action & (KAUTH_VNODE_READ_DATA | KAUTH_VNODE_READ_EXTATTRIBUTES)) {
            mask |= R_OK;
        }
        if (action & (KAUTH_VNODE_WRITE_DATA | KAUTH_VNODE_APPEND_DATA)) {
            mask |= W_OK;
        }
        if (action & KAUTH_VNODE_EXECUTE) {
            mask |= X_OK;
        }
    }

    if (action & (KAUTH_VNODE_WRITE_ATTRIBUTES    |
                  KAUTH_VNODE_WRITE_EXTATTRIBUTES |
                  KAUTH_VNODE_WRITE_SECURITY)) {
        mask |= W_OK;
    }

    bzero(&fdi, sizeof(fdi));

    fdisp_init(&fdi, sizeof(*fai));
    fdisp_make_vp(&fdi, FUSE_ACCESS, vp, context);

    fai = fdi.indata;
    fai->mask = F_OK;
    fai->mask |= mask;

    if (!(err = fdisp_wait_answ(&fdi))) {
        fuse_ticket_drop(fdi.tick);
    }

    if (err == ENOSYS) {
        /*
         * Make sure we don't come in here again.
         */
        vfs_clearauthopaque(mp);
        fuse_clear_implemented(data, FSESS_NOIMPLBIT(ACCESS));
        err = default_error;
    }

    if (err == ENOENT) {

        const char *vname = NULL;

#if M_MACFUSE_ENABLE_UNSUPPORTED
        vname = vnode_getname(vp);
#endif /* M_MACFUSE_ENABLE_UNSUPPORTED */

        IOLog("MacFUSE: disappearing vnode %p (name=%s type=%d action=%x)\n",
              vp, (vname) ? vname : "?", vnode_vtype(vp), action);

#if M_MACFUSE_ENABLE_UNSUPPORTED
        if (vname) {
            vnode_putname(vname);
        }
#endif /* M_MACFUSE_ENABLE_UNSUPPORTED */

        /*
         * On 10.4, I think I can get Finder to lock because of /.Trashes/<uid>
         * unless I use REVOKE_NONE here.
         */
         
#if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK
        fuse_biglock_unlock(data->biglock);
#endif
        fuse_internal_vnode_disappear(vp, context, REVOKE_SOFT);
#if M_MACFUSE_ENABLE_INTERIM_FSNODE_LOCK && !M_MACFUSE_ENABLE_HUGE_LOCK
        fuse_biglock_lock(data->biglock);
#endif
    }

    return err;
}
Example #12
0
/* getattr sidekicks */
__private_extern__
int
fuse_internal_loadxtimes(vnode_t vp, struct vnode_attr *out_vap,
                         vfs_context_t context)
{
    struct vnode_attr *in_vap = VTOVA(vp);
    struct fuse_data *data = fuse_get_mpdata(vnode_mount(vp));
    struct fuse_dispatcher fdi;
    struct fuse_getxtimes_out *fgxo = NULL;
    int isvroot = vnode_isvroot(vp);
    struct timespec t = { 0, 0 };
    const struct timespec kZeroTime = { 0, 0 };
    int err = 0;

    if (!(data->dataflags & FSESS_XTIMES)) {
        /* We don't return anything. */
        goto out;
    }

    if (VTOFUD(vp)->c_flag & C_XTIMES_VALID) {
        VATTR_RETURN(out_vap, va_backup_time, in_vap->va_backup_time);
        VATTR_RETURN(out_vap, va_create_time, in_vap->va_create_time);
        goto out;
    }

    if (!fuse_implemented(data, FSESS_NOIMPLBIT(GETXTIMES))) {
        goto fake;
    }

    if (fuse_isdeadfs(vp) && isvroot) {
        goto fake;
    }

    if (!(data->dataflags & FSESS_INITED) && isvroot) {
        goto fake;
    }

    err = fdisp_simple_putget_vp(&fdi, FUSE_GETXTIMES, vp, context);
    if (err) {
        /* We don't ever treat this as a hard error. */
        err = 0;
        goto fake;
    }

    fgxo = (struct fuse_getxtimes_out *)fdi.answ;

    t.tv_sec = (time_t)fgxo->bkuptime; /* XXX: truncation */
    t.tv_nsec = fgxo->bkuptimensec;
    VATTR_RETURN(in_vap, va_backup_time, t);
    VATTR_RETURN(out_vap, va_backup_time, t);

    t.tv_sec = (time_t)fgxo->crtime; /* XXX: truncation */
    t.tv_nsec = fgxo->crtimensec;
    VATTR_RETURN(in_vap, va_create_time, t);
    VATTR_RETURN(out_vap, va_create_time, t);

    fuse_ticket_drop(fdi.tick);

    VTOFUD(vp)->c_flag |= C_XTIMES_VALID;

    goto out;

fake:
    VATTR_RETURN(out_vap, va_backup_time, kZeroTime);
    VATTR_RETURN(out_vap, va_create_time, kZeroTime);

out:
    return err;
}