Example #1
0
/*
 * Undo loopback mount
 */
static int
lo_unmount(struct vfs *vfsp, int flag, struct cred *cr)
{
    struct loinfo *li;

    if (secpolicy_fs_unmount(cr, vfsp) != 0)
        return (EPERM);

    /*
     * Forced unmount is not supported by this file system
     * and thus, ENOTSUP, is being returned.
     */
    if (flag & MS_FORCE)
        return (ENOTSUP);

    li = vtoli(vfsp);
#ifdef LODEBUG
    lo_dprint(4, "lo_unmount(%p) li %p\n", vfsp, li);
#endif
    if (li->li_refct != 1 || li->li_rootvp->v_count != 1) {
#ifdef LODEBUG
        lo_dprint(4, "refct %d v_ct %d\n", li->li_refct,
                  li->li_rootvp->v_count);
#endif
        return (EBUSY);
    }
    VN_RELE(li->li_rootvp);
    return (0);
}
Example #2
0
/*
 * ctfs_unmount - the VFS_UNMOUNT entry point
 */
static int
ctfs_unmount(vfs_t *vfsp, int flag, struct cred *cr)
{
	ctfs_vfs_t *data;

	if (secpolicy_fs_unmount(cr, vfsp) != 0)
		return (EPERM);

	/*
	 * Supporting forced unmounts would be nice to do at some
	 * point.
	 */
	if (flag & MS_FORCE)
		return (ENOTSUP);

	/*
	 * We should never have a reference count less than 2: one for
	 * the caller, one for the root vnode.
	 */
	ASSERT(vfsp->vfs_count >= 2);

	/*
	 * If we have any active vnodes, they will (transitively) have
	 * holds on the root vnode.
	 */
	data = vfsp->vfs_data;
	if (data->ctvfs_root->v_count > 1)
		return (EBUSY);

	/*
	 * Release the last hold on the root vnode.  It will, in turn,
	 * release its hold on us.
	 */
	VN_RELE(data->ctvfs_root);

	/*
	 * Disappear.
	 */
	kmem_free(data, sizeof (ctfs_vfs_t));

	return (0);
}
Example #3
0
static int
VMBlockUnmount(struct vfs *vfsp,   // IN: This file system
               int flag,           // IN: Unmount flags
               struct cred *credp) // IN: Credentials of caller
{
   VMBlockMountInfo *mip;
   int ret;

   Debug(VMBLOCK_ENTRY_LOGLEVEL, "VMBlockUnmount: entry\n");

   ret = secpolicy_fs_unmount(credp, vfsp);
   if (ret) {
      return ret;
   }

   mip = (VMBlockMountInfo *)vfsp->vfs_data;

   mutex_enter(&mip->root->v_lock);
   if (mip->root->v_count > 1) {
      mutex_exit(&mip->root->v_lock);
      return EBUSY;
   }
   mutex_exit(&mip->root->v_lock);

   VN_RELE(vfsp->vfs_vnodecovered);
   /*
    * We don't need to VN_RELE() mip->redirectVnode since it's the realVnode
    * for mip->root.  That means when we VN_RELE() mip->root and
    * VMBlockInactive() is called, VMBlockVnodePut() will VN_RELE()
    * mip->redirectVnode for us.  It's like magic, but better.
    */
   VN_RELE(mip->root);

   pn_free(&mip->redirectPath);
   kmem_free(mip, sizeof *mip);

   vfsp->vfs_flag |= VFS_UNMOUNTED;

   return 0;
}
Example #4
0
/* ARGSUSED */
static int
fdunmount(vfs_t *vfsp, int flag, cred_t *cr)
{
	vnode_t *rvp;

	if (secpolicy_fs_unmount(cr, vfsp) != 0)
		return (EPERM);

	/*
	 * forced unmount is not supported by this file system
	 * and thus, ENOTSUP, is being returned.
	 */
	if (flag & MS_FORCE)
		return (ENOTSUP);

	rvp = (vnode_t *)vfsp->vfs_data;
	if (rvp->v_count > 1)
		return (EBUSY);

	VN_RELE(rvp);
	return (0);
}
static int
objfs_unmount(vfs_t *vfsp, int flag, struct cred *cr)
{
	objfs_vfs_t *data;

	if (secpolicy_fs_unmount(cr, vfsp) != 0)
		return (EPERM);

	/*
	 * We do not currently support forced unmounts
	 */
	if (flag & MS_FORCE)
		return (ENOTSUP);

	/*
	 * We should never have a reference count of less than 2: one for the
	 * caller, one for the root vnode.
	 */
	ASSERT(vfsp->vfs_count >= 2);

	/*
	 * Any active vnodes will result in a hold on the root vnode
	 */
	data = vfsp->vfs_data;
	if (data->objfs_vfs_root->v_count > 1)
		return (EBUSY);

	/*
	 * Release the last hold on the root vnode
	 */
	VN_RELE(data->objfs_vfs_root);

	kmem_free(data, sizeof (objfs_vfs_t));

	return (0);
}
Example #6
0
/*ARGSUSED*/
static int
zfs_umount(vfs_t *vfsp, int fflag, cred_t *cr)
{
	zfsvfs_t *zfsvfs = vfsp->vfs_data;
	int ret;

	if ((ret = secpolicy_fs_unmount(cr, vfsp)) != 0)
		return (ret);


	(void) dnlc_purge_vfsp(vfsp, 0);

	/*
	 * Unmount any snapshots mounted under .zfs before unmounting the
	 * dataset itself.
	 */
	if (zfsvfs->z_ctldir != NULL &&
	    (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
		return (ret);

	if (fflag & MS_FORCE) {
		vfsp->vfs_flag |= VFS_UNMOUNTED;
		zfsvfs->z_unmounted1 = B_TRUE;

		/*
		 * Wait for all zfs threads to leave zfs.
		 * Grabbing a rwlock as reader in all vops and
		 * as writer here doesn't work because it too easy to get
		 * multiple reader enters as zfs can re-enter itself.
		 * This can lead to deadlock if there is an intervening
		 * rw_enter as writer.
		 * So a file system threads ref count (z_op_cnt) is used.
		 * A polling loop on z_op_cnt may seem inefficient, but
		 * - this saves all threads on exit from having to grab a
		 *   mutex in order to cv_signal
		 * - only occurs on forced unmount in the rare case when
		 *   there are outstanding threads within the file system.
		 */
		while (zfsvfs->z_op_cnt) {
			delay(1);
		}

		zfs_objset_close(zfsvfs);

		return (0);
	}
	/*
	 * Stop all delete threads.
	 */
	(void) zfs_delete_thread_target(zfsvfs, 0);

	/*
	 * Check the number of active vnodes in the file system.
	 * Our count is maintained in the vfs structure, but the number
	 * is off by 1 to indicate a hold on the vfs structure itself.
	 *
	 * The '.zfs' directory maintains a reference of its own, and any active
	 * references underneath are reflected in the vnode count.
	 */
	if (zfsvfs->z_ctldir == NULL) {
		if (vfsp->vfs_count > 1) {
			if ((zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) == 0)
				(void) zfs_delete_thread_target(zfsvfs, 1);
			return (EBUSY);
		}
	} else {
		if (vfsp->vfs_count > 2 ||
		    (zfsvfs->z_ctldir->v_count > 1 && !(fflag & MS_FORCE))) {
			if ((zfsvfs->z_vfs->vfs_flag & VFS_RDONLY) == 0)
				(void) zfs_delete_thread_target(zfsvfs, 1);
			return (EBUSY);
		}
	}

	vfsp->vfs_flag |= VFS_UNMOUNTED;
	zfs_objset_close(zfsvfs);

	return (0);
}
/*ARGSUSED*/
static int
zfs_umount(vfs_t *vfsp, int fflag)
{
	zfsvfs_t *zfsvfs = vfsp->vfs_data;
	objset_t *os;
	cred_t *cr = curthread->td_ucred;
	int ret;

	ret = secpolicy_fs_unmount(cr, vfsp);
	if (ret) {
		ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource),
		    ZFS_DELEG_PERM_MOUNT, cr);
		if (ret)
			return (ret);
	}
	/*
	 * We purge the parent filesystem's vfsp as the parent filesystem
	 * and all of its snapshots have their vnode's v_vfsp set to the
	 * parent's filesystem's vfsp.  Note, 'z_parent' is self
	 * referential for non-snapshots.
	 */
	(void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0);

	/*
	 * Unmount any snapshots mounted under .zfs before unmounting the
	 * dataset itself.
	 */
	if (zfsvfs->z_ctldir != NULL) {
		if ((ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0)
			return (ret);
		ret = vflush(vfsp, 0, 0, curthread);
		ASSERT(ret == EBUSY);
		if (!(fflag & MS_FORCE)) {
			if (zfsvfs->z_ctldir->v_count > 1)
				return (EBUSY);
			ASSERT(zfsvfs->z_ctldir->v_count == 1);
		}
		zfsctl_destroy(zfsvfs);
		ASSERT(zfsvfs->z_ctldir == NULL);
	}

	if (fflag & MS_FORCE) {
		/*
		 * Mark file system as unmounted before calling
		 * vflush(FORCECLOSE). This way we ensure no future vnops
		 * will be called and risk operating on DOOMED vnodes.
		 */
		rrw_enter(&zfsvfs->z_teardown_lock, RW_WRITER, FTAG);
		zfsvfs->z_unmounted = B_TRUE;
		rrw_exit(&zfsvfs->z_teardown_lock, FTAG);
	}

	/*
	 * Flush all the files.
	 */
	ret = vflush(vfsp, 1, (fflag & MS_FORCE) ? FORCECLOSE : 0, curthread);
	if (ret != 0) {
		if (!zfsvfs->z_issnap) {
			zfsctl_create(zfsvfs);
			ASSERT(zfsvfs->z_ctldir != NULL);
		}
		return (ret);
	}

	if (!(fflag & MS_FORCE)) {
		/*
		 * Check the number of active vnodes in the file system.
		 * Our count is maintained in the vfs structure, but the
		 * number is off by 1 to indicate a hold on the vfs
		 * structure itself.
		 *
		 * The '.zfs' directory maintains a reference of its
		 * own, and any active references underneath are
		 * reflected in the vnode count.
		 */
		if (zfsvfs->z_ctldir == NULL) {
			if (vfsp->vfs_count > 1)
				return (EBUSY);
		} else {
			if (vfsp->vfs_count > 2 ||
			    zfsvfs->z_ctldir->v_count > 1)
				return (EBUSY);
		}
	} else {
		MNT_ILOCK(vfsp);
		vfsp->mnt_kern_flag |= MNTK_UNMOUNTF;
		MNT_IUNLOCK(vfsp);
	}

	VERIFY(zfsvfs_teardown(zfsvfs, B_TRUE) == 0);
	os = zfsvfs->z_os;

	/*
	 * z_os will be NULL if there was an error in
	 * attempting to reopen zfsvfs.
	 */
	if (os != NULL) {
		/*
		 * Unset the objset user_ptr.
		 */
		mutex_enter(&os->os->os_user_ptr_lock);
		dmu_objset_set_user(os, NULL);
		mutex_exit(&os->os->os_user_ptr_lock);

		/*
		 * Finally release the objset
		 */
		dmu_objset_close(os);
	}

	/*
	 * We can now safely destroy the '.zfs' directory node.
	 */
	if (zfsvfs->z_ctldir != NULL)
		zfsctl_destroy(zfsvfs);
	if (zfsvfs->z_issnap) {
		vnode_t *svp = vfsp->mnt_vnodecovered;

		if (svp->v_count >= 2)
			VN_RELE(svp);
	}
	zfs_freevfs(vfsp);

	return (0);
}
Example #8
0
/*
 * vfs operations
 */
static int
smbfs_unmount(vfs_t *vfsp, int flag, cred_t *cr)
{
	smbmntinfo_t	*smi;
	smbnode_t	*rtnp;

	smi = VFTOSMI(vfsp);

	if (secpolicy_fs_unmount(cr, vfsp) != 0)
		return (EPERM);

	if ((flag & MS_FORCE) == 0) {
		smbfs_rflush(vfsp, cr);

		/*
		 * If there are any active vnodes on this file system,
		 * (other than the root vnode) then the file system is
		 * busy and can't be umounted.
		 */
		if (smbfs_check_table(vfsp, smi->smi_root))
			return (EBUSY);

		/*
		 * We normally hold a ref to the root vnode, so
		 * check for references beyond the one we expect:
		 *   smbmntinfo_t -> smi_root
		 * Note that NFS does not hold the root vnode.
		 */
		if (smi->smi_root &&
		    smi->smi_root->r_vnode->v_count > 1)
			return (EBUSY);
	}

	/*
	 * common code for both forced and non-forced
	 *
	 * Setting VFS_UNMOUNTED prevents new operations.
	 * Operations already underway may continue,
	 * but not for long.
	 */
	vfsp->vfs_flag |= VFS_UNMOUNTED;

	/*
	 * Shutdown any outstanding I/O requests on this share,
	 * and force a tree disconnect.  The share object will
	 * continue to hang around until smb_share_rele().
	 * This should also cause most active nodes to be
	 * released as their operations fail with EIO.
	 */
	smb_share_kill(smi->smi_share);

	/*
	 * If we hold the root VP (and we normally do)
	 * then it's safe to release it now.
	 */
	if (smi->smi_root) {
		rtnp = smi->smi_root;
		smi->smi_root = NULL;
		VN_RELE(rtnp->r_vnode);	/* release root vnode */
	}

	/*
	 * Remove all nodes from the node hash tables.
	 * This (indirectly) calls: smbfs_addfree, smbinactive,
	 * which will try to flush dirty pages, etc. so
	 * don't destroy the underlying share just yet.
	 *
	 * Also, with a forced unmount, some nodes may
	 * remain active, and those will get cleaned up
	 * after their last vn_rele.
	 */
	smbfs_destroy_table(vfsp);

	/*
	 * Delete our kstats...
	 *
	 * Doing it here, rather than waiting until
	 * smbfs_freevfs so these are not visible
	 * after the unmount.
	 */
	if (smi->smi_io_kstats) {
		kstat_delete(smi->smi_io_kstats);
		smi->smi_io_kstats = NULL;
	}
	if (smi->smi_ro_kstats) {
		kstat_delete(smi->smi_ro_kstats);
		smi->smi_ro_kstats = NULL;
	}

	/*
	 * The rest happens in smbfs_freevfs()
	 */
	return (0);
}