static void
zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm)
{
	avl_index_t where;
	vfs_t *vfsp;
	refstr_t *pathref;
	char newpath[MAXNAMELEN];
	char *tail;

	ASSERT(MUTEX_HELD(&sdp->sd_lock));
	ASSERT(sep != NULL);

	vfsp = vn_mountedvfs(sep->se_root);
	ASSERT(vfsp != NULL);

	vfs_lock_wait(vfsp);

	/*
	 * Change the name in the AVL tree.
	 */
	avl_remove(&sdp->sd_snaps, sep);
	kmem_free(sep->se_name, strlen(sep->se_name) + 1);
	sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
	(void) strcpy(sep->se_name, nm);
	VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL);
	avl_insert(&sdp->sd_snaps, sep, where);

	/*
	 * Change the current mountpoint info:
	 * 	- update the tail of the mntpoint path
	 *	- update the tail of the resource path
	 */
	pathref = vfs_getmntpoint(vfsp);
	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
	VERIFY((tail = strrchr(newpath, '/')) != NULL);
	*(tail+1) = '\0';
	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
	(void) strcat(newpath, nm);
	refstr_rele(pathref);
	vfs_setmntpoint(vfsp, newpath);

	pathref = vfs_getresource(vfsp);
	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
	VERIFY((tail = strrchr(newpath, '@')) != NULL);
	*(tail+1) = '\0';
	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
	(void) strcat(newpath, nm);
	refstr_rele(pathref);
	vfs_setresource(vfsp, newpath);

	vfs_unlock(vfsp);
}
示例#2
0
/*
 * Get the label if any of a zfs filesystem.  Get the dataset, then
 * get its mlslabel property, convert as needed, and return it.  If
 * there's no mlslabel or it is the default one, return NULL.
 */
static ts_label_t *
getflabel_zfs(vfs_t *vfsp)
{
	int		error;
	ts_label_t	*tsl = NULL;
	refstr_t	*resource_ref;
	bslabel_t	ds_sl;
	char		ds_hexsl[MAXNAMELEN];
	const char	*osname;

	resource_ref = vfs_getresource(vfsp);
	osname = refstr_value(resource_ref);

	error = dsl_prop_get(osname, zfs_prop_to_name(ZFS_PROP_MLSLABEL),
	    1, sizeof (ds_hexsl), &ds_hexsl, NULL);
	refstr_rele(resource_ref);

	if ((error) || (strcasecmp(ds_hexsl, ZFS_MLSLABEL_DEFAULT) == 0))
		return (NULL);
	if (hexstr_to_label(ds_hexsl, &ds_sl) != 0)
		return (NULL);

	tsl = labelalloc(&ds_sl, default_doi, KM_SLEEP);
	return (tsl);
}
示例#3
0
/*
 * There's no protocol today to obtain the label from the server.
 * So we rely on conventions: zones, zone names, and zone paths
 * must match across TX servers and their TX clients.  Now use
 * the exported name to find the equivalent local zone and its
 * label.  Caller is responsible for doing a label_rele of the
 * returned ts_label.
 */
ts_label_t *
getflabel_cipso(vfs_t *vfsp)
{
	zone_t	*reszone;
	zone_t	*new_reszone;
	char	*nfspath, *respath;
	refstr_t	*resource_ref;
	boolean_t	treat_abs = B_FALSE;

	if (vfsp->vfs_resource == NULL)
		return (NULL);			/* error */
	resource_ref = vfs_getresource(vfsp);

	nfspath = (char *)refstr_value(resource_ref);
	respath = strchr(nfspath, ':');		/* skip server name */
	if (respath)
		respath++;			/* skip over ":" */
	if (*respath != '/') {
		/* treat path as absolute but it doesn't have leading '/' */
		treat_abs = B_TRUE;
	}

	reszone = zone_find_by_any_path(respath, treat_abs);
	if (reszone == global_zone) {
		refstr_rele(resource_ref);
		label_hold(l_admin_low);
		zone_rele(reszone);
		return (l_admin_low);
	}

	/*
	 * Skip over zonepath (not including "root"), e.g. /zone/internal
	 */
	respath += reszone->zone_rootpathlen - 7;
	if (treat_abs)
		respath--;			/* no leading '/' to skip */
	if (strncmp(respath, "/root/", 6) == 0) {
		/* Check if we now have something like "/zone/public/" */

		respath += 5;			/* skip "/root" first */
		new_reszone = zone_find_by_any_path(respath, B_FALSE);
		if (new_reszone != global_zone) {
			zone_rele(reszone);
			reszone = new_reszone;
		} else {
			zone_rele(new_reszone);
		}
	}

	refstr_rele(resource_ref);
	label_hold(reszone->zone_slabel);
	zone_rele(reszone);

	return (reszone->zone_slabel);
}
示例#4
0
/*ARGSUSED*/
static int
lo_mount(struct vfs *vfsp,
         struct vnode *vp,
         struct mounta *uap,
         struct cred *cr)
{
    int error;
    struct vnode *srootvp = NULL;	/* the server's root */
    struct vnode *realrootvp;
    struct loinfo *li;
    int nodev;

    nodev = vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL);

    if ((error = secpolicy_fs_mount(cr, vp, vfsp)) != 0)
        return (EPERM);

    /*
     * Loopback devices which get "nodevices" added can be done without
     * "nodevices" set because we cannot import devices into a zone
     * with loopback.  Note that we have all zone privileges when
     * this happens; if not, we'd have gotten "nosuid".
     */
    if (!nodev && vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
        vfs_setmntopt(vfsp, MNTOPT_DEVICES, NULL, VFS_NODISPLAY);

    mutex_enter(&vp->v_lock);
    if (!(uap->flags & MS_OVERLAY) &&
            (vp->v_count != 1 || (vp->v_flag & VROOT))) {
        mutex_exit(&vp->v_lock);
        return (EBUSY);
    }
    mutex_exit(&vp->v_lock);

    /*
     * Find real root, and make vfs point to real vfs
     */

    if (error = lookupname(uap->spec, (uap->flags & MS_SYSSPACE) ?
                           UIO_SYSSPACE : UIO_USERSPACE, FOLLOW, NULLVPP, &realrootvp))
        return (error);

    /*
     * Enforce MAC policy if needed.
     *
     * Loopback mounts must not allow writing up. The dominance test
     * is intended to prevent a global zone caller from accidentally
     * creating write-up conditions between two labeled zones.
     * Local zones can't violate MAC on their own without help from
     * the global zone because they can't name a pathname that
     * they don't already have.
     *
     * The special case check for the NET_MAC_AWARE process flag is
     * to support the case of the automounter in the global zone. We
     * permit automounting of local zone directories such as home
     * directories, into the global zone as required by setlabel,
     * zonecopy, and saving of desktop sessions. Such mounts are
     * trusted not to expose the contents of one zone's directories
     * to another by leaking them through the global zone.
     */
    if (is_system_labeled() && crgetzoneid(cr) == GLOBAL_ZONEID) {
        char	specname[MAXPATHLEN];
        zone_t	*from_zptr;
        zone_t	*to_zptr;

        if (vnodetopath(NULL, realrootvp, specname,
                        sizeof (specname), CRED()) != 0) {
            VN_RELE(realrootvp);
            return (EACCES);
        }

        from_zptr = zone_find_by_path(specname);
        to_zptr = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));

        /*
         * Special case for zone devfs: the zone for /dev will
         * incorrectly appear as the global zone since it's not
         * under the zone rootpath.  So for zone devfs check allow
         * read-write mounts.
         *
         * Second special case for scratch zones used for Live Upgrade:
         * this is used to mount the zone's root from /root to /a in
         * the scratch zone.  As with the other special case, this
         * appears to be outside of the zone because it's not under
         * the zone rootpath, which is $ZONEPATH/lu in the scratch
         * zone case.
         */

        if (from_zptr != to_zptr &&
                !(to_zptr->zone_flags & ZF_IS_SCRATCH)) {
            /*
             * We know at this point that the labels aren't equal
             * because the zone pointers aren't equal, and zones
             * can't share a label.
             *
             * If the source is the global zone then making
             * it available to a local zone must be done in
             * read-only mode as the label will become admin_low.
             *
             * If it is a mount between local zones then if
             * the current process is in the global zone and has
             * the NET_MAC_AWARE flag, then regular read-write
             * access is allowed.  If it's in some other zone, but
             * the label on the mount point dominates the original
             * source, then allow the mount as read-only
             * ("read-down").
             */
            if (from_zptr->zone_id == GLOBAL_ZONEID) {
                /* make the mount read-only */
                vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
            } else { /* cross-zone mount */
                if (to_zptr->zone_id == GLOBAL_ZONEID &&
                        /* LINTED: no consequent */
                        getpflags(NET_MAC_AWARE, cr) != 0) {
                    /* Allow the mount as read-write */
                } else if (bldominates(
                               label2bslabel(to_zptr->zone_slabel),
                               label2bslabel(from_zptr->zone_slabel))) {
                    /* make the mount read-only */
                    vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
                } else {
                    VN_RELE(realrootvp);
                    zone_rele(to_zptr);
                    zone_rele(from_zptr);
                    return (EACCES);
                }
            }
        }
        zone_rele(to_zptr);
        zone_rele(from_zptr);
    }

    /*
     * realrootvp may be an AUTOFS node, in which case we
     * perform a VOP_ACCESS() to trigger the mount of the
     * intended filesystem, so we loopback mount the intended
     * filesystem instead of the AUTOFS filesystem.
     */
    (void) VOP_ACCESS(realrootvp, 0, 0, cr, NULL);

    /*
     * We're interested in the top most filesystem.
     * This is specially important when uap->spec is a trigger
     * AUTOFS node, since we're really interested in mounting the
     * filesystem AUTOFS mounted as result of the VOP_ACCESS()
     * call not the AUTOFS node itself.
     */
    if (vn_mountedvfs(realrootvp) != NULL) {
        if (error = traverse(&realrootvp)) {
            VN_RELE(realrootvp);
            return (error);
        }
    }

    /*
     * Allocate a vfs info struct and attach it
     */
    li = kmem_zalloc(sizeof (struct loinfo), KM_SLEEP);
    li->li_realvfs = realrootvp->v_vfsp;
    li->li_mountvfs = vfsp;

    /*
     * Set mount flags to be inherited by loopback vfs's
     */
    if (vfs_optionisset(vfsp, MNTOPT_RO, NULL)) {
        li->li_mflag |= VFS_RDONLY;
    }
    if (vfs_optionisset(vfsp, MNTOPT_NOSUID, NULL)) {
        li->li_mflag |= (VFS_NOSETUID|VFS_NODEVICES);
    }
    if (vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL)) {
        li->li_mflag |= VFS_NODEVICES;
    }
    if (vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL)) {
        li->li_mflag |= VFS_NOSETUID;
    }
    /*
     * Permissive flags are added to the "deny" bitmap.
     */
    if (vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL)) {
        li->li_dflag |= VFS_XATTR;
    }
    if (vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL)) {
        li->li_dflag |= VFS_NBMAND;
    }

    /*
     * Propagate inheritable mount flags from the real vfs.
     */
    if ((li->li_realvfs->vfs_flag & VFS_RDONLY) &&
            !vfs_optionisset(vfsp, MNTOPT_RO, NULL))
        vfs_setmntopt(vfsp, MNTOPT_RO, NULL,
                      VFS_NODISPLAY);
    if ((li->li_realvfs->vfs_flag & VFS_NOSETUID) &&
            !vfs_optionisset(vfsp, MNTOPT_NOSETUID, NULL))
        vfs_setmntopt(vfsp, MNTOPT_NOSETUID, NULL,
                      VFS_NODISPLAY);
    if ((li->li_realvfs->vfs_flag & VFS_NODEVICES) &&
            !vfs_optionisset(vfsp, MNTOPT_NODEVICES, NULL))
        vfs_setmntopt(vfsp, MNTOPT_NODEVICES, NULL,
                      VFS_NODISPLAY);
    /*
     * Permissive flags such as VFS_XATTR, as opposed to restrictive flags
     * such as VFS_RDONLY, are handled differently.  An explicit
     * MNTOPT_NOXATTR should override the underlying filesystem's VFS_XATTR.
     */
    if ((li->li_realvfs->vfs_flag & VFS_XATTR) &&
            !vfs_optionisset(vfsp, MNTOPT_NOXATTR, NULL) &&
            !vfs_optionisset(vfsp, MNTOPT_XATTR, NULL))
        vfs_setmntopt(vfsp, MNTOPT_XATTR, NULL,
                      VFS_NODISPLAY);
    if ((li->li_realvfs->vfs_flag & VFS_NBMAND) &&
            !vfs_optionisset(vfsp, MNTOPT_NBMAND, NULL) &&
            !vfs_optionisset(vfsp, MNTOPT_NONBMAND, NULL))
        vfs_setmntopt(vfsp, MNTOPT_NBMAND, NULL,
                      VFS_NODISPLAY);

    li->li_refct = 0;
    vfsp->vfs_data = (caddr_t)li;
    vfsp->vfs_bcount = 0;
    vfsp->vfs_fstype = lofsfstype;
    vfsp->vfs_bsize = li->li_realvfs->vfs_bsize;

    vfsp->vfs_dev = li->li_realvfs->vfs_dev;
    vfsp->vfs_fsid.val[0] = li->li_realvfs->vfs_fsid.val[0];
    vfsp->vfs_fsid.val[1] = li->li_realvfs->vfs_fsid.val[1];

    if (vfs_optionisset(vfsp, MNTOPT_LOFS_NOSUB, NULL)) {
        li->li_flag |= LO_NOSUB;
    }

    /*
     * Propagate any VFS features
     */

    vfs_propagate_features(li->li_realvfs, vfsp);

    /*
     * Setup the hashtable. If the root of this mount isn't a directory,
     * there's no point in allocating a large hashtable. A table with one
     * bucket is sufficient.
     */
    if (realrootvp->v_type != VDIR)
        lsetup(li, 1);
    else
        lsetup(li, 0);

    /*
     * Make the root vnode
     */
    srootvp = makelonode(realrootvp, li, 0);
    srootvp->v_flag |= VROOT;
    li->li_rootvp = srootvp;

#ifdef LODEBUG
    lo_dprint(4, "lo_mount: vfs %p realvfs %p root %p realroot %p li %p\n",
              vfsp, li->li_realvfs, srootvp, realrootvp, li);
#endif
    return (0);
}
示例#5
0
/*ARGSUSED*/
static int
zfs_vfs_unmount(struct mount *mp, int mntflags, vfs_context_t context)
{
	zfsvfs_t *zfsvfs = vfs_fsprivate(mp);	
	objset_t *os = zfsvfs->z_os;
	znode_t	*zp, *nextzp;
	int ret, i;
	int flags;
	
	/*XXX NOEL: delegation admin stuffs, add back if we use delg. admin */
#if 0
	ret = 0; /* UNDEFINED: secpolicy_fs_unmount(cr, vfsp); */
	if (ret) {
		ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
		    ZFS_DELEG_PERM_MOUNT, cr);
		if (ret)
			return (ret);
	}

	/*
	 * We purge the parent filesystem's vfsp as the parent filesystem
	 * and all of its snapshots have their vnode's v_vfsp set to the
	 * parent's filesystem's vfsp.  Note, 'z_parent' is self
	 * referential for non-snapshots.
	 */
	(void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);
#endif

	/*
	 * Unmount any snapshots mounted under .zfs before unmounting the
	 * dataset itself.
	 */
#if 0
	if (zfsvfs->z_ctldir != NULL &&
	    (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) {
		return (ret);
#endif
	flags = SKIPSYSTEM;
	if (mntflags & MNT_FORCE)
		flags |= FORCECLOSE;

	ret = vflush(mp, NULLVP, flags);

	/*
	 * Mac OS X needs a file system modify time
	 *
	 * We use the mtime of the "com.apple.system.mtime" 
	 * extended attribute, which is associated with the
	 * file system root directory.
	 *
	 * Here we need to release the ref we took on z_mtime_vp during mount.
	 */
	if ((ret == 0) || (mntflags & MNT_FORCE)) {
		if (zfsvfs->z_mtime_vp != NULL) {
			struct vnode *mvp;

			mvp = zfsvfs->z_mtime_vp;
			zfsvfs->z_mtime_vp = NULL;

			if (vnode_get(mvp) == 0) {
				vnode_rele(mvp);
				vnode_recycle(mvp);
				vnode_put(mvp);
			}
		}
	}

	if (!(mntflags & MNT_FORCE)) {
		/*
		 * Check the number of active vnodes in the file system.
		 * Our count is maintained in the vfs structure, but the
		 * number is off by 1 to indicate a hold on the vfs
		 * structure itself.
		 *
		 * The '.zfs' directory maintains a reference of its
		 * own, and any active references underneath are
		 * reflected in the vnode count.
		 */
		
		if (ret)
			return (EBUSY);
#if 0
		if (zfsvfs->z_ctldir == NULL) {
			if (vfsp->vfs_count > 1)
				return (EBUSY);
		} else {
			if (vfsp->vfs_count > 2 ||
			    zfsvfs->z_ctldir->v_count > 1) {
				return (EBUSY);
			}
		}
#endif
	}

	rw_enter(&zfsvfs->z_unmount_lock, RW_WRITER);
	rw_enter(&zfsvfs->z_unmount_inactive_lock, RW_WRITER);

	/*
	 * At this point there are no vops active, and any new vops will
	 * fail with EIO since we have z_unmount_lock for writer (only
	 * relavent for forced unmount).
	 *
	 * Release all holds on dbufs.
	 * Note, the dmu can still callback via znode_pageout_func()
	 * which can zfs_znode_free() the znode.  So we lock
	 * z_all_znodes; search the list for a held dbuf; drop the lock
	 * (we know zp can't disappear if we hold a dbuf lock) then
	 * regrab the lock and restart.
	 */
	mutex_enter(&zfsvfs->z_znodes_lock);
	for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) {
		nextzp = list_next(&zfsvfs->z_all_znodes, zp);
		if (zp->z_dbuf_held) {
			/* dbufs should only be held when force unmounting */
			zp->z_dbuf_held = 0;
			mutex_exit(&zfsvfs->z_znodes_lock);
			dmu_buf_rele(zp->z_dbuf, NULL);
			/* Start again */
			mutex_enter(&zfsvfs->z_znodes_lock);
			nextzp = list_head(&zfsvfs->z_all_znodes);
		}
	}
	mutex_exit(&zfsvfs->z_znodes_lock);

	/*
	 * Set the unmounted flag and let new vops unblock.
	 * zfs_inactive will have the unmounted behavior, and all other
	 * vops will fail with EIO.
	 */
	zfsvfs->z_unmounted = B_TRUE;
	rw_exit(&zfsvfs->z_unmount_lock);
	rw_exit(&zfsvfs->z_unmount_inactive_lock);

	/*
	 * Unregister properties.
	 */
#ifndef __APPLE__
	if (!dmu_objset_is_snapshot(os))
		zfs_unregister_callbacks(zfsvfs);
#endif
	/*
	 * Close the zil. NB: Can't close the zil while zfs_inactive
	 * threads are blocked as zil_close can call zfs_inactive.
	 */
	if (zfsvfs->z_log) {
		zil_close(zfsvfs->z_log);
		zfsvfs->z_log = NULL;
	}

	/*
	 * Evict all dbufs so that cached znodes will be freed
	 */
	if (dmu_objset_evict_dbufs(os, B_TRUE)) {
		txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0);
		(void) dmu_objset_evict_dbufs(os, B_FALSE);
	}

	/*
	 * Finally close the objset
	 */
	dmu_objset_close(os);

	/*
	 * We can now safely destroy the '.zfs' directory node.
	 */
#if 0
	if (zfsvfs->z_ctldir != NULL)
		zfsctl_destroy(zfsvfs);
#endif

	/*
	 * Note that this work is normally done in zfs_freevfs, but since
	 * there is no VOP_FREEVFS in OSX, we free VFS items here
	 */
	OSDecrementAtomic((SInt32 *)&zfs_active_fs_count);
 	for (i = 0; i != ZFS_OBJ_MTX_SZ; i++)
 		mutex_destroy(&zfsvfs->z_hold_mtx[i]);
 
 	mutex_destroy(&zfsvfs->z_znodes_lock);
 	list_destroy(&zfsvfs->z_all_znodes);
 	rw_destroy(&zfsvfs->z_unmount_lock);
 	rw_destroy(&zfsvfs->z_unmount_inactive_lock);

	return (0);
}


 
struct vnode* vnode_getparent(struct vnode *vp);  /* sys/vnode_internal.h */

static int
zfs_vget_internal(zfsvfs_t *zfsvfs, ino64_t ino, struct vnode **vpp)
{
	struct vnode	*vp;
	struct vnode	*dvp = NULL;
	znode_t		*zp;
	int		error;

	*vpp = NULL;
	
	/*
	 * On Mac OS X we always export the root directory id as 2
	 * and its parent as 1
	 */
	if (ino == 2 || ino == 1)
		ino = zfsvfs->z_root;
	
	if ((error = zfs_zget(zfsvfs, ino, &zp)))
		goto out;

	/* Don't expose EA objects! */
	if (zp->z_phys->zp_flags & ZFS_XATTR) {
		vnode_put(ZTOV(zp));
		error = ENOENT;
		goto out;
	}

	*vpp = vp = ZTOV(zp);

	if (vnode_isvroot(vp))
		goto out;

	/*
	 * If this znode didn't just come from the cache then
	 * it won't have a valid identity (parent and name).
	 *
	 * Manually fix its identity here (normally done by namei lookup).
	 */
	if ((dvp = vnode_getparent(vp)) == NULL) {
		if (zp->z_phys->zp_parent != 0 &&
		    zfs_vget_internal(zfsvfs, zp->z_phys->zp_parent, &dvp)) {
			goto out;
		}
		if ( vnode_isdir(dvp) ) {
			char objname[ZAP_MAXNAMELEN];  /* 256 bytes */
			int flags = VNODE_UPDATE_PARENT;

			/* Look for znode's name in its parent's zap */
			if ( zap_value_search(zfsvfs->z_os,
			                      zp->z_phys->zp_parent, 
			                      zp->z_id,
			                      ZFS_DIRENT_OBJ(-1ULL),
			                      objname) == 0 ) {
				flags |= VNODE_UPDATE_NAME;
			}

			/* Update the znode's parent and name */
			vnode_update_identity(vp, dvp, objname, 0, 0, flags);
		}
	}
	/* All done with znode's parent */
	vnode_put(dvp);
out:
	return (error);
}

/*
 * Get a vnode from a file id (ignoring the generation)
 *
 * Use by NFS Server (readdirplus) and VFS (build_path)
 */
static int
zfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context)
{
	zfsvfs_t *zfsvfs = vfs_fsprivate(mp);
	int error;

	ZFS_ENTER(zfsvfs);

	/*
	 * On Mac OS X we always export the root directory id as 2.
	 * So we don't expect to see the real root directory id
	 * from zfs_vfs_vget KPI (unless of course the real id was
	 * already 2).
	 */
	if ((ino == zfsvfs->z_root) && (zfsvfs->z_root != 2)) {
		ZFS_EXIT(zfsvfs);
		return (ENOENT);
	}
	error = zfs_vget_internal(zfsvfs, ino, vpp);

	ZFS_EXIT(zfsvfs);
	return (error);
}
/* ARGSUSED */
static int
zfsctl_snapdir_lookup(vnode_t *dvp, char *nm, vnode_t **vpp, pathname_t *pnp,
    int flags, vnode_t *rdir, cred_t *cr, caller_context_t *ct,
    int *direntflags, pathname_t *realpnp)
{
	zfsctl_snapdir_t *sdp = dvp->v_data;
	objset_t *snap;
	char snapname[MAXNAMELEN];
	char real[MAXNAMELEN];
	char *mountpoint;
	zfs_snapentry_t *sep, search;
	struct mounta margs;
	vfs_t *vfsp;
	size_t mountpoint_len;
	avl_index_t where;
	zfsvfs_t *zfsvfs = dvp->v_vfsp->vfs_data;
	int err;

	/*
	 * No extended attributes allowed under .zfs
	 */
	if (flags & LOOKUP_XATTR)
		return (EINVAL);

	ASSERT(dvp->v_type == VDIR);

	/*
	 * If we get a recursive call, that means we got called
	 * from the domount() code while it was trying to look up the
	 * spec (which looks like a local path for zfs).  We need to
	 * add some flag to domount() to tell it not to do this lookup.
	 */
	if (MUTEX_HELD(&sdp->sd_lock))
		return (ENOENT);

	ZFS_ENTER(zfsvfs);

	if (gfs_lookup_dot(vpp, dvp, zfsvfs->z_ctldir, nm) == 0) {
		ZFS_EXIT(zfsvfs);
		return (0);
	}

	if (flags & FIGNORECASE) {
		boolean_t conflict = B_FALSE;

		err = dmu_snapshot_realname(zfsvfs->z_os, nm, real,
		    MAXNAMELEN, &conflict);
		if (err == 0) {
			nm = real;
		} else if (err != ENOTSUP) {
			ZFS_EXIT(zfsvfs);
			return (err);
		}
		if (realpnp)
			(void) strlcpy(realpnp->pn_buf, nm,
			    realpnp->pn_bufsize);
		if (conflict && direntflags)
			*direntflags = ED_CASE_CONFLICT;
	}

	mutex_enter(&sdp->sd_lock);
	search.se_name = (char *)nm;
	if ((sep = avl_find(&sdp->sd_snaps, &search, &where)) != NULL) {
		*vpp = sep->se_root;
		VN_HOLD(*vpp);
		err = traverse(vpp);
		if (err) {
			VN_RELE(*vpp);
			*vpp = NULL;
		} else if (*vpp == sep->se_root) {
			/*
			 * The snapshot was unmounted behind our backs,
			 * try to remount it.
			 */
			goto domount;
		} else {
			/*
			 * VROOT was set during the traverse call.  We need
			 * to clear it since we're pretending to be part
			 * of our parent's vfs.
			 */
			(*vpp)->v_flag &= ~VROOT;
		}
		mutex_exit(&sdp->sd_lock);
		ZFS_EXIT(zfsvfs);
		return (err);
	}

	/*
	 * The requested snapshot is not currently mounted, look it up.
	 */
	err = zfsctl_snapshot_zname(dvp, nm, MAXNAMELEN, snapname);
	if (err) {
		mutex_exit(&sdp->sd_lock);
		ZFS_EXIT(zfsvfs);
		/*
		 * handle "ls *" or "?" in a graceful manner,
		 * forcing EILSEQ to ENOENT.
		 * Since shell ultimately passes "*" or "?" as name to lookup
		 */
		return (err == EILSEQ ? ENOENT : err);
	}
	if (dmu_objset_hold(snapname, FTAG, &snap) != 0) {
		mutex_exit(&sdp->sd_lock);
		ZFS_EXIT(zfsvfs);
		return (ENOENT);
	}

	sep = kmem_alloc(sizeof (zfs_snapentry_t), KM_SLEEP);
	sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
	(void) strcpy(sep->se_name, nm);
	*vpp = sep->se_root = zfsctl_snapshot_mknode(dvp, dmu_objset_id(snap));
	avl_insert(&sdp->sd_snaps, sep, where);

	dmu_objset_rele(snap, FTAG);
domount:
	mountpoint_len = strlen(refstr_value(dvp->v_vfsp->vfs_mntpt)) +
	    strlen("/.zfs/snapshot/") + strlen(nm) + 1;
	mountpoint = kmem_alloc(mountpoint_len, KM_SLEEP);
	(void) snprintf(mountpoint, mountpoint_len, "%s/.zfs/snapshot/%s",
	    refstr_value(dvp->v_vfsp->vfs_mntpt), nm);

	margs.spec = snapname;
	margs.dir = mountpoint;
	margs.flags = MS_SYSSPACE | MS_NOMNTTAB;
	margs.fstype = "zfs";
	margs.dataptr = NULL;
	margs.datalen = 0;
	margs.optptr = NULL;
	margs.optlen = 0;

	err = domount("zfs", &margs, *vpp, kcred, &vfsp);
	kmem_free(mountpoint, mountpoint_len);

	if (err == 0) {
		/*
		 * Return the mounted root rather than the covered mount point.
		 * Takes the GFS vnode at .zfs/snapshot/<snapname> and returns
		 * the ZFS vnode mounted on top of the GFS node.  This ZFS
		 * vnode is the root of the newly created vfsp.
		 */
		VFS_RELE(vfsp);
		err = traverse(vpp);
	}

	if (err == 0) {
		/*
		 * Fix up the root vnode mounted on .zfs/snapshot/<snapname>.
		 *
		 * This is where we lie about our v_vfsp in order to
		 * make .zfs/snapshot/<snapname> accessible over NFS
		 * without requiring manual mounts of <snapname>.
		 */
		ASSERT(VTOZ(*vpp)->z_zfsvfs != zfsvfs);
		VTOZ(*vpp)->z_zfsvfs->z_parent = zfsvfs;
		(*vpp)->v_vfsp = zfsvfs->z_vfs;
		(*vpp)->v_flag &= ~VROOT;
	}
	mutex_exit(&sdp->sd_lock);
	ZFS_EXIT(zfsvfs);

	/*
	 * If we had an error, drop our hold on the vnode and
	 * zfsctl_snapshot_inactive() will clean up.
	 */
	if (err) {
		VN_RELE(*vpp);
		*vpp = NULL;
	}
	return (err);
}
示例#7
0
/* ARGSUSED */
int
ufs_fioffs(
	struct vnode	*vp,
	char 		*vap,		/* must be NULL - reserved */
	struct cred	*cr)		/* credentials from ufs_ioctl */
{
	int error;
	struct ufsvfs	*ufsvfsp;
	struct ulockfs	*ulp;

	/* file system has been forcibly unmounted */
	ufsvfsp = VTOI(vp)->i_ufsvfs;
	if (ufsvfsp == NULL)
		return (EIO);

	ulp = &ufsvfsp->vfs_ulockfs;

	/*
	 * suspend the delete thread
	 *	this must be done outside the lockfs locking protocol
	 */
	vfs_lock_wait(vp->v_vfsp);
	ufs_thread_suspend(&ufsvfsp->vfs_delete);

	/* hold the mutex to prevent race with a lockfs request */
	mutex_enter(&ulp->ul_lock);
	atomic_inc_ulong(&ufs_quiesce_pend);

	if (ULOCKFS_IS_HLOCK(ulp)) {
		error = EIO;
		goto out;
	}
	if (ULOCKFS_IS_ELOCK(ulp)) {
		error = EBUSY;
		goto out;
	}
	/* wait for outstanding accesses to finish */
	if (error = ufs_quiesce(ulp))
		goto out;

	/*
	 * If logging, and the logmap was marked as not rollable,
	 * make it rollable now, and start the trans_roll thread and
	 * the reclaim thread.  The log at this point is safe to write to.
	 */
	if (ufsvfsp->vfs_log) {
		ml_unit_t	*ul = ufsvfsp->vfs_log;
		struct fs	*fsp = ufsvfsp->vfs_fs;
		int		err;

		if (ul->un_flags & LDL_NOROLL) {
			ul->un_flags &= ~LDL_NOROLL;
			logmap_start_roll(ul);
			if (!fsp->fs_ronly && (fsp->fs_reclaim &
			    (FS_RECLAIM|FS_RECLAIMING))) {
				fsp->fs_reclaim &= ~FS_RECLAIM;
				fsp->fs_reclaim |= FS_RECLAIMING;
				ufs_thread_start(&ufsvfsp->vfs_reclaim,
				    ufs_thread_reclaim, vp->v_vfsp);
				if (!fsp->fs_ronly) {
					TRANS_SBWRITE(ufsvfsp,
					    TOP_SBUPDATE_UPDATE);
					if (err =
					    geterror(ufsvfsp->vfs_bufp)) {
						refstr_t	*mntpt;
						mntpt = vfs_getmntpoint(
						    vp->v_vfsp);
						cmn_err(CE_NOTE,
						    "Filesystem Flush "
						    "Failed to update "
						    "Reclaim Status for "
						    " %s, Write failed to "
						    "update superblock, "
						    "error %d",
						    refstr_value(mntpt),
						    err);
						refstr_rele(mntpt);
					}
				}
			}
		}
	}

	/* synchronously flush dirty data and metadata */
	error = ufs_flush(vp->v_vfsp);

out:
	atomic_dec_ulong(&ufs_quiesce_pend);
	cv_broadcast(&ulp->ul_cv);
	mutex_exit(&ulp->ul_lock);
	vfs_unlock(vp->v_vfsp);

	/*
	 * allow the delete thread to continue
	 */
	ufs_thread_continue(&ufsvfsp->vfs_delete);
	return (error);
}
示例#8
0
/*
 * Given a directory, return the full, resolved path.  This looks up "..",
 * searches for the given vnode in the parent, appends the component, etc.  It
 * is used to implement vnodetopath() and getcwd() when the cached path fails.
 */
static int
dirtopath(vnode_t *vrootp, vnode_t *vp, char *buf, size_t buflen, int flags,
    cred_t *cr)
{
	pathname_t pn, rpn, emptypn;
	vnode_t *cmpvp, *pvp = NULL;
	vnode_t *startvp = vp;
	int err = 0, vprivs;
	size_t complen;
	char *dbuf;
	dirent64_t *dp;
	char		*bufloc;
	size_t		dlen = DIRENT64_RECLEN(MAXPATHLEN);
	refstr_t	*mntpt;

	/* Operation only allowed on directories */
	ASSERT(vp->v_type == VDIR);

	/* We must have at least enough space for "/" */
	if (buflen < 2)
		return (ENAMETOOLONG);

	/* Start at end of string with terminating null */
	bufloc = &buf[buflen - 1];
	*bufloc = '\0';

	pn_alloc(&pn);
	pn_alloc(&rpn);
	dbuf = kmem_alloc(dlen, KM_SLEEP);
	bzero(&emptypn, sizeof (emptypn));

	/*
	 * Begin with an additional reference on vp.  This will be decremented
	 * during the loop.
	 */
	VN_HOLD(vp);

	for (;;) {
		/*
		 * Return if we've reached the root.  If the buffer is empty,
		 * return '/'.  We explicitly don't use vn_compare(), since it
		 * compares the real vnodes.  A lofs mount of '/' would produce
		 * incorrect results otherwise.
		 */
		if (VN_CMP(vrootp, vp)) {
			if (*bufloc == '\0')
				*--bufloc = '/';
			break;
		}

		/*
		 * If we've reached the VFS root, something has gone wrong.  We
		 * should have reached the root in the above check.  The only
		 * explantation is that 'vp' is not contained withing the given
		 * root, in which case we return EPERM.
		 */
		if (VN_CMP(rootdir, vp)) {
			err = EPERM;
			goto out;
		}

		/*
		 * Shortcut: see if this vnode is a mountpoint.  If so,
		 * grab the path information from the vfs_t.
		 */
		if (vp->v_flag & VROOT) {

			mntpt = vfs_getmntpoint(vp->v_vfsp);
			if ((err = pn_set(&pn, (char *)refstr_value(mntpt)))
			    == 0) {
				refstr_rele(mntpt);
				rpn.pn_path = rpn.pn_buf;

				/*
				 * Ensure the mountpoint still exists.
				 */
				VN_HOLD(vrootp);
				if (vrootp != rootdir)
					VN_HOLD(vrootp);
				if (lookuppnvp(&pn, &rpn, flags, NULL,
				    &cmpvp, vrootp, vrootp, cr) == 0) {

					if (VN_CMP(vp, cmpvp)) {
						VN_RELE(cmpvp);

						complen = strlen(rpn.pn_path);
						bufloc -= complen;
						if (bufloc < buf) {
							err = ERANGE;
							goto out;
						}
						bcopy(rpn.pn_path, bufloc,
						    complen);
						break;
					} else {
						VN_RELE(cmpvp);
					}
				}
			} else {
				refstr_rele(mntpt);
			}
		}

		/*
		 * Shortcut: see if this vnode has correct v_path. If so,
		 * we have the work done.
		 */
		mutex_enter(&vp->v_lock);
		if (vp->v_path != NULL) {

			if ((err = pn_set(&pn, vp->v_path)) == 0) {
				mutex_exit(&vp->v_lock);
				rpn.pn_path = rpn.pn_buf;

				/*
				 * Ensure the v_path pointing to correct vnode
				 */
				VN_HOLD(vrootp);
				if (vrootp != rootdir)
					VN_HOLD(vrootp);
				if (lookuppnvp(&pn, &rpn, flags, NULL,
				    &cmpvp, vrootp, vrootp, cr) == 0) {

					if (VN_CMP(vp, cmpvp)) {
						VN_RELE(cmpvp);

						complen = strlen(rpn.pn_path);
						bufloc -= complen;
						if (bufloc < buf) {
							err = ERANGE;
							goto out;
						}
						bcopy(rpn.pn_path, bufloc,
						    complen);
						break;
					} else {
						VN_RELE(cmpvp);
					}
				}
			} else {
				mutex_exit(&vp->v_lock);
			}
		} else {
			mutex_exit(&vp->v_lock);
		}

		/*
		 * Shortcuts failed, search for this vnode in its parent.  If
		 * this is a mountpoint, then get the vnode underneath.
		 */
		if (vp->v_flag & VROOT)
			vp = vn_under(vp);
		if ((err = VOP_LOOKUP(vp, "..", &pvp, &emptypn, 0, vrootp, cr,
		    NULL, NULL, NULL)) != 0)
			goto out;

		/*
		 * With extended attributes, it's possible for a directory to
		 * have a parent that is a regular file.  Check for that here.
		 */
		if (pvp->v_type != VDIR) {
			err = ENOTDIR;
			goto out;
		}

		/*
		 * If this is true, something strange has happened.  This is
		 * only true if we are the root of a filesystem, which should
		 * have been caught by the check above.
		 */
		if (VN_CMP(pvp, vp)) {
			err = ENOENT;
			goto out;
		}

		/*
		 * Check if we have read and search privilege so, that
		 * we can lookup the path in the directory
		 */
		vprivs = (flags & LOOKUP_CHECKREAD) ? VREAD | VEXEC : VEXEC;
		if ((err = VOP_ACCESS(pvp, vprivs, 0, cr, NULL)) != 0) {
			goto out;
		}

		/*
		 * Try to obtain the path component from dnlc cache
		 * before searching through the directory.
		 */
		if ((cmpvp = dnlc_reverse_lookup(vp, dbuf, dlen)) != NULL) {
			/*
			 * If we got parent vnode as a result,
			 * then the answered path is correct.
			 */
			if (VN_CMP(cmpvp, pvp)) {
				VN_RELE(cmpvp);
				complen = strlen(dbuf);
				bufloc -= complen;
				if (bufloc <= buf) {
					err = ENAMETOOLONG;
					goto out;
				}
				bcopy(dbuf, bufloc, complen);

				/* Prepend a slash to the current path */
				*--bufloc = '/';

				/* And continue with the next component */
				VN_RELE(vp);
				vp = pvp;
				pvp = NULL;
				continue;
			} else {
				VN_RELE(cmpvp);
			}
		}

		/*
		 * Search the parent directory for the entry corresponding to
		 * this vnode.
		 */
		if ((err = dirfindvp(vrootp, pvp, vp, cr, dbuf, dlen, &dp))
		    != 0)
			goto out;
		complen = strlen(dp->d_name);
		bufloc -= complen;
		if (bufloc <= buf) {
			err = ENAMETOOLONG;
			goto out;
		}
		bcopy(dp->d_name, bufloc, complen);

		/* Prepend a slash to the current path.  */
		*--bufloc = '/';

		/* And continue with the next component */
		VN_RELE(vp);
		vp = pvp;
		pvp = NULL;
	}

	/*
	 * Place the path at the beginning of the buffer.
	 */
	if (bufloc != buf)
		ovbcopy(bufloc, buf, buflen - (bufloc - buf));

out:
	/*
	 * If the error was ESTALE and the current directory to look in
	 * was the root for this lookup, the root for a mounted file
	 * system, or the starting directory for lookups, then
	 * return ENOENT instead of ESTALE.  In this case, no recovery
	 * is possible by the higher level.  If ESTALE was returned for
	 * some intermediate directory along the path, then recovery
	 * is potentially possible and retrying from the higher level
	 * will either correct the situation by purging stale cache
	 * entries or eventually get back to the point where no recovery
	 * is possible.
	 */
	if (err == ESTALE &&
	    (VN_CMP(vp, vrootp) || (vp->v_flag & VROOT) || vp == startvp))
		err = ENOENT;

	kmem_free(dbuf, dlen);
	VN_RELE(vp);
	if (pvp)
		VN_RELE(pvp);
	pn_free(&pn);
	pn_free(&rpn);

	return (err);
}
示例#9
0
int
dogetcwd(char *buf, size_t buflen)
{
	int ret;
	vnode_t *vp;
	vnode_t *compvp;
	refstr_t *cwd, *oldcwd;
	const char *value;
	pathname_t rpnp, pnp;
	proc_t *p = curproc;

	/*
	 * Check to see if there is a cached version of the cwd.  If so, lookup
	 * the cached value and make sure it is the same vnode.
	 */
	mutex_enter(&p->p_lock);
	if ((cwd = PTOU(p)->u_cwd) != NULL)
		refstr_hold(cwd);
	vp = PTOU(p)->u_cdir;
	VN_HOLD(vp);
	mutex_exit(&p->p_lock);

	/*
	 * Make sure we have permission to access the current directory.
	 */
	if ((ret = VOP_ACCESS(vp, VEXEC, 0, CRED(), NULL)) != 0) {
		if (cwd != NULL)
			refstr_rele(cwd);
		VN_RELE(vp);
		return (ret);
	}

	if (cwd) {
		value = refstr_value(cwd);
		if ((ret = pn_get((char *)value, UIO_SYSSPACE, &pnp)) != 0) {
			refstr_rele(cwd);
			VN_RELE(vp);
			return (ret);
		}

		pn_alloc(&rpnp);

		if (lookuppn(&pnp, &rpnp, NO_FOLLOW, NULL, &compvp) == 0) {

			if (VN_CMP(vp, compvp) &&
			    strcmp(value, rpnp.pn_path) == 0) {
				VN_RELE(compvp);
				VN_RELE(vp);
				pn_free(&pnp);
				pn_free(&rpnp);
				if (strlen(value) + 1 > buflen) {
					refstr_rele(cwd);
					return (ENAMETOOLONG);
				}
				bcopy(value, buf, strlen(value) + 1);
				refstr_rele(cwd);
				return (0);
			}

			VN_RELE(compvp);
		}

		pn_free(&rpnp);
		pn_free(&pnp);

		refstr_rele(cwd);
	}

	ret = vnodetopath_common(NULL, vp, buf, buflen, CRED(),
	    LOOKUP_CHECKREAD);

	VN_RELE(vp);

	/*
	 * Store the new cwd and replace the existing cached copy.
	 */
	if (ret == 0)
		cwd = refstr_alloc(buf);
	else
		cwd = NULL;

	mutex_enter(&p->p_lock);
	oldcwd = PTOU(p)->u_cwd;
	PTOU(p)->u_cwd = cwd;
	mutex_exit(&p->p_lock);

	if (oldcwd)
		refstr_rele(oldcwd);

	return (ret);
}
示例#10
0
/*
 * getflabel -
 *
 * Return pointer to the ts_label associated with the specified file,
 * or returns NULL if error occurs.  Caller is responsible for doing
 * a label_rele of the ts_label.
 */
ts_label_t *
getflabel(vnode_t *vp)
{
	vfs_t		*vfsp, *rvfsp;
	vnode_t		*rvp, *rvp2;
	zone_t		*zone;
	ts_label_t	*zl;
	int		err;
	boolean_t	vfs_is_held = B_FALSE;
	char		vpath[MAXPATHLEN];

	ASSERT(vp);
	vfsp = vp->v_vfsp;
	if (vfsp == NULL)
		return (NULL);

	rvp = vp;

	/*
	 * Traverse lofs mounts and fattach'es to get the real vnode
	 */
	if (VOP_REALVP(rvp, &rvp2, NULL) == 0)
		rvp = rvp2;

	rvfsp = rvp->v_vfsp;

	/* rvp/rvfsp now represent the real vnode/vfs we will be using */

	/* Go elsewhere to handle all nfs files. */
	if (strncmp(vfssw[rvfsp->vfs_fstype].vsw_name, "nfs", 3) == 0)
		return (getflabel_nfs(rvfsp));

	/*
	 * Fast path, for objects in a labeled zone: everything except
	 * for lofs/nfs will be just the label of that zone.
	 */
	if ((rvfsp->vfs_zone != NULL) && (rvfsp->vfs_zone != global_zone)) {
		if ((strcmp(vfssw[rvfsp->vfs_fstype].vsw_name,
		    "lofs") != 0)) {
			zone = rvfsp->vfs_zone;
			zone_hold(zone);
			goto zone_out;		/* return this label */
		}
	}

	/*
	 * Get the vnode path -- it may be missing or weird for some
	 * cases, like devices.  In those cases use the label of the
	 * current zone.
	 */
	err = vnodetopath(rootdir, rvp, vpath, sizeof (vpath), kcred);
	if ((err != 0) || (*vpath != '/')) {
		zone = curproc->p_zone;
		zone_hold(zone);
		goto zone_out;
	}

	/*
	 * For zfs filesystem, return the explicit label property if a
	 * meaningful one exists.
	 */
	if (strncmp(vfssw[rvfsp->vfs_fstype].vsw_name, "zfs", 3) == 0) {
		ts_label_t *tsl;

		tsl = getflabel_zfs(rvfsp);

		/* if label found, return it, otherwise continue... */
		if (tsl != NULL)
			return (tsl);
	}

	/*
	 * If a mountpoint exists, hold the vfs while we reference it.
	 * Otherwise if mountpoint is NULL it should not be held (e.g.,
	 * a hold/release on spec_vfs would result in an attempted free
	 * and panic.)
	 */
	if (vfsp->vfs_mntpt != NULL) {
		VFS_HOLD(vfsp);
		vfs_is_held = B_TRUE;
	}

	zone = zone_find_by_any_path(vpath, B_FALSE);

	/*
	 * If the vnode source zone is properly set to a non-global zone, or
	 * any zone if the mount is R/W, then use the label of that zone.
	 */
	if ((zone != global_zone) || ((vfsp->vfs_flag & VFS_RDONLY) != 0))
		goto zone_out;		/* return this label */

	/*
	 * Otherwise, if we're not in the global zone, use the label of
	 * our zone.
	 */
	if ((zone = curproc->p_zone) != global_zone) {
		zone_hold(zone);
		goto zone_out;		/* return this label */
	}

	/*
	 * We're in the global zone and the mount is R/W ... so the file
	 * may actually be in the global zone -- or in the root of any zone.
	 * Always build our own path for the file, to be sure it's simplified
	 * (i.e., no ".", "..", "//", and so on).
	 */

	zone_rele(zone);
	zone = zone_find_by_any_path(vpath, B_FALSE);

zone_out:
	if ((curproc->p_zone == global_zone) && (zone == global_zone)) {
		vfs_t		*nvfs;
		boolean_t	exported = B_FALSE;
		refstr_t	*mntpt_ref;
		char		*mntpt;

		/*
		 * File is in the global zone - check whether it's admin_high.
		 * If it's in a filesys that was exported from the global zone,
		 * it's admin_low by definition.  Otherwise, if it's in a
		 * filesys that's NOT exported to any zone, it's admin_high.
		 *
		 * And for these files if there wasn't a valid mount resource,
		 * the file must be admin_high (not exported, probably a global
		 * zone device).
		 */
		if (!vfs_is_held)
			goto out_high;

		mntpt_ref = vfs_getmntpoint(vfsp);
		mntpt = (char *)refstr_value(mntpt_ref);

		if ((mntpt != NULL) && (*mntpt == '/')) {
			zone_t	*to_zone;

			to_zone = zone_find_by_any_path(mntpt, B_FALSE);
			zone_rele(to_zone);
			if (to_zone != global_zone) {
				/* force admin_low */
				exported = B_TRUE;
			}
		}
		if (mntpt_ref)
			refstr_rele(mntpt_ref);

		if (!exported) {
			size_t	plen = strlen(vpath);

			vfs_list_read_lock();
			nvfs = vfsp->vfs_next;
			while (nvfs != vfsp) {
				const char	*rstr;
				size_t		rlen = 0;

				/*
				 * Skip checking this vfs if it's not lofs
				 * (the only way to export from the global
				 * zone to a zone).
				 */
				if (strncmp(vfssw[nvfs->vfs_fstype].vsw_name,
				    "lofs", 4) != 0) {
					nvfs = nvfs->vfs_next;
					continue;
				}

				rstr = refstr_value(nvfs->vfs_resource);
				if (rstr != NULL)
					rlen = strlen(rstr);

				/*
				 * Check for a match: does this vfs correspond
				 * to our global zone file path?  I.e., check
				 * if the resource string of this vfs is a
				 * prefix of our path.
				 */
				if ((rlen > 0) && (rlen <= plen) &&
				    (strncmp(rstr, vpath, rlen) == 0) &&
				    (vpath[rlen] == '/' ||
				    vpath[rlen] == '\0')) {
					/* force admin_low */
					exported = B_TRUE;
					break;
				}
				nvfs = nvfs->vfs_next;
			}
			vfs_list_unlock();
		}

		if (!exported)
			goto out_high;
	}

	if (vfs_is_held)
		VFS_RELE(vfsp);

	/*
	 * Now that we have the "home" zone for the file, return the slabel
	 * of that zone.
	 */
	zl = zone->zone_slabel;
	label_hold(zl);
	zone_rele(zone);
	return (zl);

out_high:
	if (vfs_is_held)
		VFS_RELE(vfsp);

	label_hold(l_admin_high);
	zone_rele(zone);
	return (l_admin_high);
}
/*ARGSUSED*/
static int
zfs_umount(vfs_t *vfsp, int fflag)
{
	zfsvfs_t *zfsvfs = vfsp->vfs_data;
	objset_t *os;
	cred_t *cr = curthread->td_ucred;
	int ret;

	ret = secpolicy_fs_unmount(cr, vfsp);
	if (ret) {
		ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
		    ZFS_DELEG_PERM_MOUNT, cr);
		if (ret)
			return (ret);
	}
	/*
	 * We purge the parent filesystem's vfsp as the parent filesystem
	 * and all of its snapshots have their vnode's v_vfsp set to the
	 * parent's filesystem's vfsp.  Note, 'z_parent' is self
	 * referential for non-snapshots.
	 */
	(void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);

	/*
	 * Unmount any snapshots mounted under .zfs before unmounting the
	 * dataset itself.
	 */
	if (zfsvfs->z_ctldir != NULL) {
		if ((ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
			return (ret);
		ret = vflush(vfsp, 0, 0, curthread);
		ASSERT(ret == EBUSY);
		if (!(fflag & MS_FORCE)) {
			if (zfsvfs->z_ctldir->v_count > 1)
				return (EBUSY);
			ASSERT(zfsvfs->z_ctldir->v_count == 1);
		}
		zfsctl_destroy(zfsvfs);
		ASSERT(zfsvfs->z_ctldir == NULL);
	}

	if (fflag & MS_FORCE) {
		/*
		 * Mark file system as unmounted before calling
		 * vflush(FORCECLOSE). This way we ensure no future vnops
		 * will be called and risk operating on DOOMED vnodes.
		 */
		rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
		zfsvfs->z_unmounted = B_TRUE;
		rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
	}

	/*
	 * Flush all the files.
	 */
	ret = vflush(vfsp, 1, (fflag & MS_FORCE) ? FORCECLOSE : 0, curthread);
	if (ret != 0) {
		if (!zfsvfs->z_issnap) {
			zfsctl_create(zfsvfs);
			ASSERT(zfsvfs->z_ctldir != NULL);
		}
		return (ret);
	}

	if (!(fflag & MS_FORCE)) {
		/*
		 * Check the number of active vnodes in the file system.
		 * Our count is maintained in the vfs structure, but the
		 * number is off by 1 to indicate a hold on the vfs
		 * structure itself.
		 *
		 * The '.zfs' directory maintains a reference of its
		 * own, and any active references underneath are
		 * reflected in the vnode count.
		 */
		if (zfsvfs->z_ctldir == NULL) {
			if (vfsp->vfs_count > 1)
				return (EBUSY);
		} else {
			if (vfsp->vfs_count > 2 ||
			    zfsvfs->z_ctldir->v_count > 1)
				return (EBUSY);
		}
	} else {
		MNT_ILOCK(vfsp);
		vfsp->mnt_kern_flag |= MNTK_UNMOUNTF;
		MNT_IUNLOCK(vfsp);
	}

	VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
	os = zfsvfs->z_os;

	/*
	 * z_os will be NULL if there was an error in
	 * attempting to reopen zfsvfs.
	 */
	if (os != NULL) {
		/*
		 * Unset the objset user_ptr.
		 */
		mutex_enter(&os->os->os_user_ptr_lock);
		dmu_objset_set_user(os, NULL);
		mutex_exit(&os->os->os_user_ptr_lock);

		/*
		 * Finally release the objset
		 */
		dmu_objset_close(os);
	}

	/*
	 * We can now safely destroy the '.zfs' directory node.
	 */
	if (zfsvfs->z_ctldir != NULL)
		zfsctl_destroy(zfsvfs);
	if (zfsvfs->z_issnap) {
		vnode_t *svp = vfsp->mnt_vnodecovered;

		if (svp->v_count >= 2)
			VN_RELE(svp);
	}
	zfs_freevfs(vfsp);

	return (0);
}
示例#12
0
int
corectl(int subcode, uintptr_t arg1, uintptr_t arg2, uintptr_t arg3)
{
	int error = 0;
	proc_t *p;
	refstr_t *rp;
	size_t size;
	char *path;
	core_content_t content = CC_CONTENT_INVALID;
	struct core_globals *cg;
	zone_t *zone = curproc->p_zone;

	cg = zone_getspecific(core_zone_key, zone);
	ASSERT(cg != NULL);

	switch (subcode) {
	case CC_SET_OPTIONS:
		if ((error = secpolicy_coreadm(CRED())) == 0) {
			if (arg1 & ~CC_OPTIONS)
				error = EINVAL;
			else
				cg->core_options = (uint32_t)arg1;
		}
		break;

	case CC_GET_OPTIONS:
		return (cg->core_options);

	case CC_GET_GLOBAL_PATH:
	case CC_GET_DEFAULT_PATH:
	case CC_GET_PROCESS_PATH:
		if (subcode == CC_GET_GLOBAL_PATH) {
			mutex_enter(&cg->core_lock);
			if ((rp = cg->core_file) != NULL)
				refstr_hold(rp);
			mutex_exit(&cg->core_lock);
		} else if (subcode == CC_GET_DEFAULT_PATH) {
			rp = corectl_path_value(cg->core_default_path);
		} else {
			rp = NULL;
			mutex_enter(&pidlock);
			if ((p = prfind((pid_t)arg3)) == NULL ||
			    p->p_stat == SIDL) {
				mutex_exit(&pidlock);
				error = ESRCH;
			} else {
				mutex_enter(&p->p_lock);
				mutex_exit(&pidlock);
				mutex_enter(&p->p_crlock);
				if (!hasprocperm(p->p_cred, CRED()))
					error = EPERM;
				else if (p->p_corefile != NULL)
					rp = corectl_path_value(p->p_corefile);
				mutex_exit(&p->p_crlock);
				mutex_exit(&p->p_lock);
			}
		}
		if (rp == NULL) {
			if (error == 0 && suword8((void *)arg1, 0))
				error = EFAULT;
		} else {
			error = copyoutstr(refstr_value(rp), (char *)arg1,
			    (size_t)arg2, NULL);
			refstr_rele(rp);
		}
		break;

	case CC_SET_GLOBAL_PATH:
	case CC_SET_DEFAULT_PATH:
		if ((error = secpolicy_coreadm(CRED())) != 0)
			break;

		/* FALLTHROUGH */
	case CC_SET_PROCESS_PATH:
		if ((size = MIN((size_t)arg2, MAXPATHLEN)) == 0) {
			error = EINVAL;
			break;
		}
		path = kmem_alloc(size, KM_SLEEP);
		error = copyinstr((char *)arg1, path, size, NULL);
		if (error == 0) {
			if (subcode == CC_SET_PROCESS_PATH) {
				error = set_proc_info((pid_t)arg3, path, 0);
			} else if (subcode == CC_SET_DEFAULT_PATH) {
				corectl_path_set(cg->core_default_path, path);
			} else if (*path != '\0' && *path != '/') {
				error = EINVAL;
			} else {
				refstr_t *nrp = refstr_alloc(path);

				mutex_enter(&cg->core_lock);
				rp = cg->core_file;
				if (*path == '\0')
					cg->core_file = NULL;
				else
					refstr_hold(cg->core_file = nrp);
				mutex_exit(&cg->core_lock);

				if (rp != NULL)
					refstr_rele(rp);

				refstr_rele(nrp);
			}
		}
		kmem_free(path, size);
		break;

	case CC_SET_GLOBAL_CONTENT:
	case CC_SET_DEFAULT_CONTENT:
		if ((error = secpolicy_coreadm(CRED())) != 0)
			break;

		/* FALLTHROUGH */
	case CC_SET_PROCESS_CONTENT:
		error = copyin((void *)arg1, &content, sizeof (content));
		if (error != 0)
			break;

		/*
		 * If any unknown bits are set, don't let this charade
		 * continue.
		 */
		if (content & ~CC_CONTENT_ALL) {
			error = EINVAL;
			break;
		}

		if (subcode == CC_SET_PROCESS_CONTENT) {
			error = set_proc_info((pid_t)arg2, NULL, content);
		} else if (subcode == CC_SET_DEFAULT_CONTENT) {
			corectl_content_set(cg->core_default_content, content);
		} else {
			mutex_enter(&cg->core_lock);
			cg->core_content = content;
			mutex_exit(&cg->core_lock);
		}

		break;

	case CC_GET_GLOBAL_CONTENT:
		content = cg->core_content;
		error = copyout(&content, (void *)arg1, sizeof (content));
		break;

	case CC_GET_DEFAULT_CONTENT:
		content = corectl_content_value(cg->core_default_content);
		error = copyout(&content, (void *)arg1, sizeof (content));
		break;

	case CC_GET_PROCESS_CONTENT:
		mutex_enter(&pidlock);
		if ((p = prfind((pid_t)arg2)) == NULL || p->p_stat == SIDL) {
			mutex_exit(&pidlock);
			error = ESRCH;
			break;
		}

		mutex_enter(&p->p_lock);
		mutex_exit(&pidlock);
		mutex_enter(&p->p_crlock);
		if (!hasprocperm(p->p_cred, CRED()))
			error = EPERM;
		else if (p->p_content == NULL)
			content = CC_CONTENT_NONE;
		else
			content = corectl_content_value(p->p_content);
		mutex_exit(&p->p_crlock);
		mutex_exit(&p->p_lock);

		if (error == 0)
			error = copyout(&content, (void *)arg1,
			    sizeof (content));
		break;

	default:
		error = EINVAL;
		break;
	}

	if (error)
		return (set_errno(error));
	return (0);
}
示例#13
0
/*
 * smbfs_mount_label_policy:
 *	Determine whether the mount is allowed according to MAC check,
 *	by comparing (where appropriate) label of the remote server
 *	against the label of the zone being mounted into.
 *
 *	Returns:
 *		 0 :	access allowed
 *		-1 :	read-only access allowed (i.e., read-down)
 *		>0 :	error code, such as EACCES
 *
 * NB:
 * NFS supports Cipso labels by parsing the vfs_resource
 * to see what the Solaris server global zone has shared.
 * We can't support that for CIFS since resource names
 * contain share names, not paths.
 */
static int
smbfs_mount_label_policy(vfs_t *vfsp, void *ipaddr, int addr_type, cred_t *cr)
{
	bslabel_t	*server_sl, *mntlabel;
	zone_t		*mntzone = NULL;
	ts_label_t	*zlabel;
	tsol_tpc_t	*tp;
	ts_label_t	*tsl = NULL;
	int		retv;

	/*
	 * Get the zone's label.  Each zone on a labeled system has a label.
	 */
	mntzone = zone_find_by_any_path(refstr_value(vfsp->vfs_mntpt), B_FALSE);
	zlabel = mntzone->zone_slabel;
	ASSERT(zlabel != NULL);
	label_hold(zlabel);

	retv = EACCES;				/* assume the worst */

	/*
	 * Next, get the assigned label of the remote server.
	 */
	tp = find_tpc(ipaddr, addr_type, B_FALSE);
	if (tp == NULL)
		goto out;			/* error getting host entry */

	if (tp->tpc_tp.tp_doi != zlabel->tsl_doi)
		goto rel_tpc;			/* invalid domain */
	if ((tp->tpc_tp.host_type != UNLABELED))
		goto rel_tpc;			/* invalid hosttype */

	server_sl = &tp->tpc_tp.tp_def_label;
	mntlabel = label2bslabel(zlabel);

	/*
	 * Now compare labels to complete the MAC check.  If the labels
	 * are equal or if the requestor is in the global zone and has
	 * NET_MAC_AWARE, then allow read-write access.   (Except for
	 * mounts into the global zone itself; restrict these to
	 * read-only.)
	 *
	 * If the requestor is in some other zone, but his label
	 * dominates the server, then allow read-down.
	 *
	 * Otherwise, access is denied.
	 */
	if (blequal(mntlabel, server_sl) ||
	    (crgetzoneid(cr) == GLOBAL_ZONEID &&
	    getpflags(NET_MAC_AWARE, cr) != 0)) {
		if ((mntzone == global_zone) ||
		    !blequal(mntlabel, server_sl))
			retv = -1;		/* read-only */
		else
			retv = 0;		/* access OK */
	} else if (bldominates(mntlabel, server_sl)) {
		retv = -1;			/* read-only */
	} else {
		retv = EACCES;
	}

	if (tsl != NULL)
		label_rele(tsl);

rel_tpc:
	/*LINTED*/
	TPC_RELE(tp);
out:
	if (mntzone)
		zone_rele(mntzone);
	label_rele(zlabel);
	return (retv);
}
示例#14
0
/*
 * smbfs mount vfsop
 * Set up mount info record and attach it to vfs struct.
 */
static int
smbfs_mount(vfs_t *vfsp, vnode_t *mvp, struct mounta *uap, cred_t *cr)
{
	char		*data = uap->dataptr;
	int		error;
	smbnode_t 	*rtnp = NULL;	/* root of this fs */
	smbmntinfo_t 	*smi = NULL;
	dev_t 		smbfs_dev;
	int 		version;
	int 		devfd;
	zone_t		*zone = curproc->p_zone;
	zone_t		*mntzone = NULL;
	smb_share_t 	*ssp = NULL;
	smb_cred_t 	scred;
	int		flags, sec;

	STRUCT_DECL(smbfs_args, args);		/* smbfs mount arguments */

	if ((error = secpolicy_fs_mount(cr, mvp, vfsp)) != 0)
		return (error);

	if (mvp->v_type != VDIR)
		return (ENOTDIR);

	/*
	 * get arguments
	 *
	 * uap->datalen might be different from sizeof (args)
	 * in a compatible situation.
	 */
	STRUCT_INIT(args, get_udatamodel());
	bzero(STRUCT_BUF(args), SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE));
	if (copyin(data, STRUCT_BUF(args), MIN(uap->datalen,
	    SIZEOF_STRUCT(smbfs_args, DATAMODEL_NATIVE))))
		return (EFAULT);

	/*
	 * Check mount program version
	 */
	version = STRUCT_FGET(args, version);
	if (version != SMBFS_VERSION) {
		cmn_err(CE_WARN, "mount version mismatch:"
		    " kernel=%d, mount=%d\n",
		    SMBFS_VERSION, version);
		return (EINVAL);
	}

	/*
	 * Deal with re-mount requests.
	 */
	if (uap->flags & MS_REMOUNT) {
		cmn_err(CE_WARN, "MS_REMOUNT not implemented");
		return (ENOTSUP);
	}

	/*
	 * Check for busy
	 */
	mutex_enter(&mvp->v_lock);
	if (!(uap->flags & MS_OVERLAY) &&
	    (mvp->v_count != 1 || (mvp->v_flag & VROOT))) {
		mutex_exit(&mvp->v_lock);
		return (EBUSY);
	}
	mutex_exit(&mvp->v_lock);

	/*
	 * Get the "share" from the netsmb driver (ssp).
	 * It is returned with a "ref" (hold) for us.
	 * Release this hold: at errout below, or in
	 * smbfs_freevfs().
	 */
	devfd = STRUCT_FGET(args, devfd);
	error = smb_dev2share(devfd, &ssp);
	if (error) {
		cmn_err(CE_WARN, "invalid device handle %d (%d)\n",
		    devfd, error);
		return (error);
	}

	/*
	 * Use "goto errout" from here on.
	 * See: ssp, smi, rtnp, mntzone
	 */

	/*
	 * Determine the zone we're being mounted into.
	 */
	zone_hold(mntzone = zone);		/* start with this assumption */
	if (getzoneid() == GLOBAL_ZONEID) {
		zone_rele(mntzone);
		mntzone = zone_find_by_path(refstr_value(vfsp->vfs_mntpt));
		ASSERT(mntzone != NULL);
		if (mntzone != zone) {
			error = EBUSY;
			goto errout;
		}
	}

	/*
	 * Stop the mount from going any further if the zone is going away.
	 */
	if (zone_status_get(mntzone) >= ZONE_IS_SHUTTING_DOWN) {
		error = EBUSY;
		goto errout;
	}

	/*
	 * On a Trusted Extensions client, we may have to force read-only
	 * for read-down mounts.
	 */
	if (is_system_labeled()) {
		void *addr;
		int ipvers = 0;
		struct smb_vc *vcp;

		vcp = SSTOVC(ssp);
		addr = smb_vc_getipaddr(vcp, &ipvers);
		error = smbfs_mount_label_policy(vfsp, addr, ipvers, cr);

		if (error > 0)
			goto errout;

		if (error == -1) {
			/* change mount to read-only to prevent write-down */
			vfs_setmntopt(vfsp, MNTOPT_RO, NULL, 0);
		}
	}

	/* Prevent unload. */
	atomic_inc_32(&smbfs_mountcount);

	/*
	 * Create a mount record and link it to the vfs struct.
	 * No more possiblities for errors from here on.
	 * Tear-down of this stuff is in smbfs_free_smi()
	 *
	 * Compare with NFS: nfsrootvp()
	 */
	smi = kmem_zalloc(sizeof (*smi), KM_SLEEP);

	mutex_init(&smi->smi_lock, NULL, MUTEX_DEFAULT, NULL);
	cv_init(&smi->smi_statvfs_cv, NULL, CV_DEFAULT, NULL);

	rw_init(&smi->smi_hash_lk, NULL, RW_DEFAULT, NULL);
	smbfs_init_hash_avl(&smi->smi_hash_avl);

	smi->smi_share = ssp;
	ssp = NULL;

	/*
	 * Convert the anonymous zone hold acquired via zone_hold() above
	 * into a zone reference.
	 */
	zone_init_ref(&smi->smi_zone_ref);
	zone_hold_ref(mntzone, &smi->smi_zone_ref, ZONE_REF_SMBFS);
	zone_rele(mntzone);
	mntzone = NULL;

	/*
	 * Initialize option defaults
	 */
	smi->smi_flags	= SMI_LLOCK;
	smi->smi_acregmin = SEC2HR(SMBFS_ACREGMIN);
	smi->smi_acregmax = SEC2HR(SMBFS_ACREGMAX);
	smi->smi_acdirmin = SEC2HR(SMBFS_ACDIRMIN);
	smi->smi_acdirmax = SEC2HR(SMBFS_ACDIRMAX);

	/*
	 * All "generic" mount options have already been
	 * handled in vfs.c:domount() - see mntopts stuff.
	 * Query generic options using vfs_optionisset().
	 */
	if (vfs_optionisset(vfsp, MNTOPT_INTR, NULL))
		smi->smi_flags |= SMI_INT;
	if (vfs_optionisset(vfsp, MNTOPT_ACL, NULL))
		smi->smi_flags |= SMI_ACL;

	/*
	 * Get the mount options that come in as smbfs_args,
	 * starting with args.flags (SMBFS_MF_xxx)
	 */
	flags = STRUCT_FGET(args, flags);
	smi->smi_uid 	= STRUCT_FGET(args, uid);
	smi->smi_gid 	= STRUCT_FGET(args, gid);
	smi->smi_fmode	= STRUCT_FGET(args, file_mode) & 0777;
	smi->smi_dmode	= STRUCT_FGET(args, dir_mode) & 0777;

	/*
	 * Hande the SMBFS_MF_xxx flags.
	 */
	if (flags & SMBFS_MF_NOAC)
		smi->smi_flags |= SMI_NOAC;
	if (flags & SMBFS_MF_ACREGMIN) {
		sec = STRUCT_FGET(args, acregmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acregmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACREGMAX) {
		sec = STRUCT_FGET(args, acregmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acregmax = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMIN) {
		sec = STRUCT_FGET(args, acdirmin);
		if (sec < 0 || sec > SMBFS_ACMINMAX)
			sec = SMBFS_ACMINMAX;
		smi->smi_acdirmin = SEC2HR(sec);
	}
	if (flags & SMBFS_MF_ACDIRMAX) {
		sec = STRUCT_FGET(args, acdirmax);
		if (sec < 0 || sec > SMBFS_ACMAXMAX)
			sec = SMBFS_ACMAXMAX;
		smi->smi_acdirmax = SEC2HR(sec);
	}

	/*
	 * Get attributes of the remote file system,
	 * i.e. ACL support, named streams, etc.
	 */
	smb_credinit(&scred, cr);
	error = smbfs_smb_qfsattr(smi->smi_share, &smi->smi_fsa, &scred);
	smb_credrele(&scred);
	if (error) {
		SMBVDEBUG("smbfs_smb_qfsattr error %d\n", error);
	}

	/*
	 * We enable XATTR by default (via smbfs_mntopts)
	 * but if the share does not support named streams,
	 * force the NOXATTR option (also clears XATTR).
	 * Caller will set or clear VFS_XATTR after this.
	 */
	if ((smi->smi_fsattr & FILE_NAMED_STREAMS) == 0)
		vfs_setmntopt(vfsp, MNTOPT_NOXATTR, NULL, 0);

	/*
	 * Ditto ACLs (disable if not supported on this share)
	 */
	if ((smi->smi_fsattr & FILE_PERSISTENT_ACLS) == 0) {
		vfs_setmntopt(vfsp, MNTOPT_NOACL, NULL, 0);
		smi->smi_flags &= ~SMI_ACL;
	}

	/*
	 * Assign a unique device id to the mount
	 */
	mutex_enter(&smbfs_minor_lock);
	do {
		smbfs_minor = (smbfs_minor + 1) & MAXMIN32;
		smbfs_dev = makedevice(smbfs_major, smbfs_minor);
	} while (vfs_devismounted(smbfs_dev));
	mutex_exit(&smbfs_minor_lock);

	vfsp->vfs_dev	= smbfs_dev;
	vfs_make_fsid(&vfsp->vfs_fsid, smbfs_dev, smbfsfstyp);
	vfsp->vfs_data	= (caddr_t)smi;
	vfsp->vfs_fstype = smbfsfstyp;
	vfsp->vfs_bsize = MAXBSIZE;
	vfsp->vfs_bcount = 0;

	smi->smi_vfsp	= vfsp;
	smbfs_zonelist_add(smi);	/* undo in smbfs_freevfs */

	/*
	 * Create the root vnode, which we need in unmount
	 * for the call to smbfs_check_table(), etc.
	 * Release this hold in smbfs_unmount.
	 */
	rtnp = smbfs_node_findcreate(smi, "\\", 1, NULL, 0, 0,
	    &smbfs_fattr0);
	ASSERT(rtnp != NULL);
	rtnp->r_vnode->v_type = VDIR;
	rtnp->r_vnode->v_flag |= VROOT;
	smi->smi_root = rtnp;

	/*
	 * NFS does other stuff here too:
	 *   async worker threads
	 *   init kstats
	 *
	 * End of code from NFS nfsrootvp()
	 */
	return (0);

errout:
	vfsp->vfs_data = NULL;
	if (smi != NULL)
		smbfs_free_smi(smi);

	if (mntzone != NULL)
		zone_rele(mntzone);

	if (ssp != NULL)
		smb_share_rele(ssp);

	return (error);
}