Esempio n. 1
0
/*
 * Traverse backward across mountpoint from the
 * root vnode of a filesystem to its mounted-on
 * vnode.
 */
vnode_t *
untraverse(vnode_t *vp)
{
	vnode_t *tvp, *nextvp;

	tvp = vp;
	for (;;) {
		if (! (tvp->v_flag & VROOT))
			break;

		/* lock vfs to prevent unmount of this vfs */
		vfs_lock_wait(tvp->v_vfsp);

		if ((nextvp = tvp->v_vfsp->vfs_vnodecovered) == NULL) {
			vfs_unlock(tvp->v_vfsp);
			break;
		}

		/*
		 * Hold nextvp to prevent unmount.  After unlock vfs and
		 * rele tvp, any number of overlays could be unmounted.
		 * Putting a hold on vfs_vnodecovered will only allow
		 * tvp's vfs to be unmounted. Of course if caller placed
		 * extra hold on vp before calling untraverse, the following
		 * hold would not be needed.  Since prev actions of caller
		 * are unknown, we need to hold here just to be safe.
		 */
		VN_HOLD(nextvp);
		vfs_unlock(tvp->v_vfsp);
		VN_RELE(tvp);
		tvp = nextvp;
	}

	return (tvp);
}
static void
zfsctl_rename_snap(zfsctl_snapdir_t *sdp, zfs_snapentry_t *sep, const char *nm)
{
	avl_index_t where;
	vfs_t *vfsp;
	refstr_t *pathref;
	char newpath[MAXNAMELEN];
	char *tail;

	ASSERT(MUTEX_HELD(&sdp->sd_lock));
	ASSERT(sep != NULL);

	vfsp = vn_mountedvfs(sep->se_root);
	ASSERT(vfsp != NULL);

	vfs_lock_wait(vfsp);

	/*
	 * Change the name in the AVL tree.
	 */
	avl_remove(&sdp->sd_snaps, sep);
	kmem_free(sep->se_name, strlen(sep->se_name) + 1);
	sep->se_name = kmem_alloc(strlen(nm) + 1, KM_SLEEP);
	(void) strcpy(sep->se_name, nm);
	VERIFY(avl_find(&sdp->sd_snaps, sep, &where) == NULL);
	avl_insert(&sdp->sd_snaps, sep, where);

	/*
	 * Change the current mountpoint info:
	 * 	- update the tail of the mntpoint path
	 *	- update the tail of the resource path
	 */
	pathref = vfs_getmntpoint(vfsp);
	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
	VERIFY((tail = strrchr(newpath, '/')) != NULL);
	*(tail+1) = '\0';
	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
	(void) strcat(newpath, nm);
	refstr_rele(pathref);
	vfs_setmntpoint(vfsp, newpath);

	pathref = vfs_getresource(vfsp);
	(void) strncpy(newpath, refstr_value(pathref), sizeof (newpath));
	VERIFY((tail = strrchr(newpath, '@')) != NULL);
	*(tail+1) = '\0';
	ASSERT3U(strlen(newpath) + strlen(nm), <, sizeof (newpath));
	(void) strcat(newpath, nm);
	refstr_rele(pathref);
	vfs_setresource(vfsp, newpath);

	vfs_unlock(vfsp);
}
Esempio n. 3
0
/* ARGSUSED */
int
ufs_fioffs(
	struct vnode	*vp,
	char 		*vap,		/* must be NULL - reserved */
	struct cred	*cr)		/* credentials from ufs_ioctl */
{
	int error;
	struct ufsvfs	*ufsvfsp;
	struct ulockfs	*ulp;

	/* file system has been forcibly unmounted */
	ufsvfsp = VTOI(vp)->i_ufsvfs;
	if (ufsvfsp == NULL)
		return (EIO);

	ulp = &ufsvfsp->vfs_ulockfs;

	/*
	 * suspend the delete thread
	 *	this must be done outside the lockfs locking protocol
	 */
	vfs_lock_wait(vp->v_vfsp);
	ufs_thread_suspend(&ufsvfsp->vfs_delete);

	/* hold the mutex to prevent race with a lockfs request */
	mutex_enter(&ulp->ul_lock);
	atomic_inc_ulong(&ufs_quiesce_pend);

	if (ULOCKFS_IS_HLOCK(ulp)) {
		error = EIO;
		goto out;
	}
	if (ULOCKFS_IS_ELOCK(ulp)) {
		error = EBUSY;
		goto out;
	}
	/* wait for outstanding accesses to finish */
	if (error = ufs_quiesce(ulp))
		goto out;

	/*
	 * If logging, and the logmap was marked as not rollable,
	 * make it rollable now, and start the trans_roll thread and
	 * the reclaim thread.  The log at this point is safe to write to.
	 */
	if (ufsvfsp->vfs_log) {
		ml_unit_t	*ul = ufsvfsp->vfs_log;
		struct fs	*fsp = ufsvfsp->vfs_fs;
		int		err;

		if (ul->un_flags & LDL_NOROLL) {
			ul->un_flags &= ~LDL_NOROLL;
			logmap_start_roll(ul);
			if (!fsp->fs_ronly && (fsp->fs_reclaim &
			    (FS_RECLAIM|FS_RECLAIMING))) {
				fsp->fs_reclaim &= ~FS_RECLAIM;
				fsp->fs_reclaim |= FS_RECLAIMING;
				ufs_thread_start(&ufsvfsp->vfs_reclaim,
				    ufs_thread_reclaim, vp->v_vfsp);
				if (!fsp->fs_ronly) {
					TRANS_SBWRITE(ufsvfsp,
					    TOP_SBUPDATE_UPDATE);
					if (err =
					    geterror(ufsvfsp->vfs_bufp)) {
						refstr_t	*mntpt;
						mntpt = vfs_getmntpoint(
						    vp->v_vfsp);
						cmn_err(CE_NOTE,
						    "Filesystem Flush "
						    "Failed to update "
						    "Reclaim Status for "
						    " %s, Write failed to "
						    "update superblock, "
						    "error %d",
						    refstr_value(mntpt),
						    err);
						refstr_rele(mntpt);
					}
				}
			}
		}
	}

	/* synchronously flush dirty data and metadata */
	error = ufs_flush(vp->v_vfsp);

out:
	atomic_dec_ulong(&ufs_quiesce_pend);
	cv_broadcast(&ulp->ul_cv);
	mutex_exit(&ulp->ul_lock);
	vfs_unlock(vp->v_vfsp);

	/*
	 * allow the delete thread to continue
	 */
	ufs_thread_continue(&ufsvfsp->vfs_delete);
	return (error);
}
Esempio n. 4
0
/*
 * ufs_fiosdio
 *	Set delayed-io state.  This ioctl is tailored
 *	to metamucil's needs and may change at any time.
 */
int
ufs_fiosdio(
	struct vnode	*vp,		/* file's vnode */
	uint_t		*diop,		/* dio flag */
	int		flag,		/* flag from ufs_ioctl */
	struct cred	*cr)		/* credentials from ufs_ioctl */
{
	uint_t		dio;		/* copy of user's dio */
	struct inode	*ip;		/* inode for vp */
	struct ufsvfs	*ufsvfsp;
	struct fs	*fs;
	struct ulockfs	*ulp;
	int		error = 0;

#ifdef lint
	flag = flag;
#endif

	/* check input conditions */
	if (secpolicy_fs_config(cr, vp->v_vfsp) != 0)
		return (EPERM);

	if (copyin(diop, &dio, sizeof (dio)))
		return (EFAULT);

	if (dio > 1)
		return (EINVAL);

	/* file system has been forcibly unmounted */
	if (VTOI(vp)->i_ufsvfs == NULL)
		return (EIO);

	ip = VTOI(vp);
	ufsvfsp = ip->i_ufsvfs;
	ulp = &ufsvfsp->vfs_ulockfs;

	/* logging file system; dio ignored */
	if (TRANS_ISTRANS(ufsvfsp))
		return (error);

	/* hold the mutex to prevent race with a lockfs request */
	vfs_lock_wait(vp->v_vfsp);
	mutex_enter(&ulp->ul_lock);
	atomic_inc_ulong(&ufs_quiesce_pend);

	if (ULOCKFS_IS_HLOCK(ulp)) {
		error = EIO;
		goto out;
	}

	if (ULOCKFS_IS_ELOCK(ulp)) {
		error = EBUSY;
		goto out;
	}
	/* wait for outstanding accesses to finish */
	if (error = ufs_quiesce(ulp))
		goto out;

	/* flush w/invalidate */
	if (error = ufs_flush(vp->v_vfsp))
		goto out;

	/*
	 * update dio
	 */
	mutex_enter(&ufsvfsp->vfs_lock);
	ufsvfsp->vfs_dio = dio;

	/*
	 * enable/disable clean flag processing
	 */
	fs = ip->i_fs;
	if (fs->fs_ronly == 0 &&
	    fs->fs_clean != FSBAD &&
	    fs->fs_clean != FSLOG) {
		if (dio)
			fs->fs_clean = FSSUSPEND;
		else
			fs->fs_clean = FSACTIVE;
		ufs_sbwrite(ufsvfsp);
		mutex_exit(&ufsvfsp->vfs_lock);
	} else
		mutex_exit(&ufsvfsp->vfs_lock);
out:
	/*
	 * we need this broadcast because of the ufs_quiesce call above
	 */
	atomic_dec_ulong(&ufs_quiesce_pend);
	cv_broadcast(&ulp->ul_cv);
	mutex_exit(&ulp->ul_lock);
	vfs_unlock(vp->v_vfsp);
	return (error);
}
Esempio n. 5
0
/*
 * Enable logging
 */
int
lqfs_enable(struct vnode *vp, struct fiolog *flp, cred_t *cr)
{
	int		error;
	inode_t		*ip = VTOI(vp);
	qfsvfs_t	*qfsvfsp = ip->i_qfsvfs;
	fs_lqfs_common_t	*fs = VFS_FS_PTR(qfsvfsp);
	ml_unit_t	*ul;
#ifdef LQFS_TODO_LOCKFS
	int		reclaim = 0;
	struct lockfs	lf;
	struct ulockfs	*ulp;
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */
	vfs_t		*vfsp = qfsvfsp->vfs_vfs;
	uint64_t	tmp_nbytes_actual;
	char fsclean;
	sam_sblk_t	*sblk = qfsvfsp->mi.m_sbp;

	/*
	 * File system is not capable of logging.
	 */
	if (!LQFS_CAPABLE(qfsvfsp)) {
		flp->error = FIOLOG_ENOTSUP;
		error = 0;
		goto out;
	}
	if (!SAM_MAGIC_V2A_OR_HIGHER(&sblk->info.sb)) {
		cmn_err(CE_WARN, "SAM-QFS: %s: Not enabling logging, "
		    " file system is not version 2A.", qfsvfsp->mt.fi_name);
		cmn_err(CE_WARN, "\tUpgrade file system with samfsck -u "
		    "first.");
		flp->error = FIOLOG_ENOTSUP;
		error = 0;
		goto out;
	}

	if (LQFS_GET_LOGBNO(fs)) {
		error = lqfs_log_validate(qfsvfsp, flp, cr);
	}

	/*
	 * Check if logging is already enabled
	 */
	if (LQFS_GET_LOGP(qfsvfsp)) {
		flp->error = FIOLOG_ETRANS;
		/* for root ensure logging option is set */
		vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
		error = 0;
		goto out;
	}

	/*
	 * Come back here to recheck if we had to disable the log.
	 */
recheck:
	error = 0;
	flp->error = FIOLOG_ENONE;

	/*
	 * Adjust requested log size
	 */
	flp->nbytes_actual = flp->nbytes_requested;
	if (flp->nbytes_actual == 0) {
		tmp_nbytes_actual =
		    (((uint64_t)FS_SIZE(fs)) / ldl_divisor) << FS_FSHIFT(fs);
		flp->nbytes_actual = (uint_t)MIN(tmp_nbytes_actual, INT_MAX);
	}
	flp->nbytes_actual = MAX(flp->nbytes_actual, ldl_minlogsize);
	flp->nbytes_actual = MIN(flp->nbytes_actual, ldl_maxlogsize);
	flp->nbytes_actual = blkroundup(fs, flp->nbytes_actual);

	/*
	 * logging is enabled and the log is the right size; done
	 */
	ul = LQFS_GET_LOGP(qfsvfsp);

	if (ul && LQFS_GET_LOGBNO(fs) &&
	    (flp->nbytes_actual == ul->un_requestsize)) {
		vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);
		error = 0;
		goto out;
	}

	/*
	 * Readonly file system
	 */
	if (FS_RDONLY(fs)) {
		flp->error = FIOLOG_EROFS;
		error = 0;
		goto out;
	}

#ifdef LQFS_TODO_LOCKFS
	/*
	 * File system must be write locked to enable logging
	 */
	error = qfs_fiolfss(vp, &lf);
	if (error) {
		goto out;
	}
	if (!LOCKFS_IS_ULOCK(&lf)) {
		flp->error = FIOLOG_EULOCK;
		error = 0;
		goto out;
	}
	lf.lf_lock = LOCKFS_WLOCK;
	lf.lf_flags = 0;
	lf.lf_comment = NULL;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_EWLOCK;
		error = 0;
		goto out;
	}
#else
	/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */

	/*
	 * Grab appropriate locks to synchronize with the rest
	 * of the system
	 */
	vfs_lock_wait(vfsp);
#ifdef LQFS_TODO_LOCKFS
	ulp = &ufsvfsp->vfs_ulockfs;
	mutex_enter(&ulp->ul_lock);
#else
	/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */

	/*
	 * File system must be fairly consistent to enable logging
	 */
	fsclean = LQFS_GET_FS_CLEAN(fs);
	if (fsclean != FSLOG &&
	    fsclean != FSACTIVE &&
	    fsclean != FSSTABLE &&
	    fsclean != FSCLEAN) {
		flp->error = FIOLOG_ECLEAN;
		goto unlockout;
	}

#ifdef LUFS
	/*
	 * A write-locked file system is only active if there are
	 * open deleted files; so remember to set FS_RECLAIM later.
	 */
	if (LQFS_GET_FS_CLEAN(fs) == FSACTIVE) {
		reclaim = FS_RECLAIM;
	}
#else
	/* QFS doesn't have a reclaim file thread. */
#endif /* LUFS */

	/*
	 * Logging is already enabled; must be changing the log's size
	 */
	if (LQFS_GET_LOGBNO(fs) && LQFS_GET_LOGP(qfsvfsp)) {
#ifdef LQFS_TODO_LOCKFS
		/*
		 * Before we can disable logging, we must give up our
		 * lock.  As a consequence of unlocking and disabling the
		 * log, the fs structure may change.  Because of this, when
		 * disabling is complete, we will go back to recheck to
		 * repeat all of the checks that we performed to get to
		 * this point.  Disabling sets fs->fs_logbno to 0, so this
		 * will not put us into an infinite loop.
		 */
		mutex_exit(&ulp->ul_lock);
#else
		/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */
		vfs_unlock(vfsp);

#ifdef LQFS_TODO_LOCKFS
		lf.lf_lock = LOCKFS_ULOCK;
		lf.lf_flags = 0;
		error = qfs_fiolfs(vp, &lf, 1);
		if (error) {
			flp->error = FIOLOG_ENOULOCK;
			error = 0;
			goto out;
		}
#else
		/* QFS doesn't really support lockfs. */
#endif /* LQFS_TODO_LOCKFS */
		error = lqfs_disable(vp, flp);
		if (error || (flp->error != FIOLOG_ENONE)) {
			error = 0;
			goto out;
		}
		goto recheck;
	}

	error = lqfs_alloc(qfsvfsp, flp, cr);
	if (error) {
		goto errout;
	}
#ifdef LUFS
#else
	if ((error = lqfs_log_validate(qfsvfsp, flp, cr)) != 0) {
		goto errout;
	}
#endif /* LUFS */

	/*
	 * Create all of the incore structs
	 */
	error = lqfs_snarf(qfsvfsp, fs, 0);
	if (error) {
		goto errout;
	}

	/*
	 * DON'T ``GOTO ERROUT'' PAST THIS POINT
	 */

	/*
	 * Pretend we were just mounted with logging enabled
	 *	freeze and drain the file system of readers
	 *		Get the ops vector
	 *		If debug, record metadata locations with log subsystem
	 *		Start the delete thread
	 *		Start the reclaim thread, if necessary
	 *	Thaw readers
	 */
	vfs_setmntopt(vfsp, MNTOPT_LOGGING, NULL, 0);

	TRANS_DOMATAMAP(qfsvfsp);
	TRANS_MATA_MOUNT(qfsvfsp);
	TRANS_MATA_SI(qfsvfsp, fs);

#ifdef LUFS
	qfs_thread_start(&qfsvfsp->vfs_delete, qfs_thread_delete, vfsp);
	if (fs->fs_reclaim & (FS_RECLAIM|FS_RECLAIMING)) {
		fs->fs_reclaim &= ~FS_RECLAIM;
		fs->fs_reclaim |=  FS_RECLAIMING;
		qfs_thread_start(&qfsvfsp->vfs_reclaim,
		    qfs_thread_reclaim, vfsp);
	} else {
		fs->fs_reclaim |= reclaim;
	}
#else
	/* QFS doesn't have file reclaim nor i-node delete threads. */
#endif /* LUFS */

#ifdef LUFS
	mutex_exit(&ulp->ul_lock);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LUFS */

	vfs_unlock(vfsp);

#ifdef LQFS_TODO_LOCKFS
	/*
	 * Unlock the file system
	 */
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_ENOULOCK;
		error = 0;
		goto out;
	}
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

	/*
	 * There's nothing in the log yet (we've just allocated it)
	 * so directly write out the super block.
	 * Note, we have to force this sb out to disk
	 * (not just to the log) so that if we crash we know we are logging
	 */
	VFS_LOCK_MUTEX_ENTER(qfsvfsp);
	LQFS_SET_FS_CLEAN(fs, FSLOG);
	LQFS_SET_FS_ROLLED(fs, FS_NEED_ROLL);	/* Mark the fs as unrolled */
#ifdef LUFS
	QFS_BWRITE2(NULL, qfsvfsp->vfs_bufp);
#else
	sam_update_sblk(qfsvfsp, 0, 0, TRUE);
#endif /* LUFS */
	VFS_LOCK_MUTEX_EXIT(qfsvfsp);

	error = 0;
	goto out;

errout:
	/*
	 * Aquire the qfs_scan_lock before de-linking the mtm data
	 * structure so that we keep qfs_sync() and qfs_update() away
	 * when they execute the ufs_scan_inodes() run while we're in
	 * progress of enabling/disabling logging.
	 */
	mutex_enter(&qfs_scan_lock);
	(void) lqfs_unsnarf(qfsvfsp);
	mutex_exit(&qfs_scan_lock);

	(void) lqfs_free(qfsvfsp);
unlockout:
#ifdef LQFS_TODO_LOCKFS
	mutex_exit(&ulp->ul_lock);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

	vfs_unlock(vfsp);

#ifdef LQFS_TODO_LOCKFS
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	(void) qfs_fiolfs(vp, &lf, 1);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

out:
	mutex_enter(&ip->mp->ms.m_waitwr_mutex);
	ip->mp->mt.fi_status |= FS_LOGSTATE_KNOWN;
	mutex_exit(&ip->mp->ms.m_waitwr_mutex);
	return (error);
}
Esempio n. 6
0
/*
 * Disable logging
 */
int
lqfs_disable(vnode_t *vp, struct fiolog *flp)
{
	int		error = 0;
	inode_t		*ip = VTOI(vp);
	qfsvfs_t	*qfsvfsp = ip->i_qfsvfs;
	fs_lqfs_common_t	*fs = VFS_FS_PTR(qfsvfsp);
#ifdef LUFS
	struct lockfs	lf;
	struct ulockfs	*ulp;
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LUFS */

	flp->error = FIOLOG_ENONE;

	/*
	 * Logging is already disabled; done
	 */
	if (LQFS_GET_LOGBNO(fs) == 0 || LQFS_GET_LOGP(qfsvfsp) == NULL ||
	    !LQFS_CAPABLE(qfsvfsp)) {
		vfs_setmntopt(qfsvfsp->vfs_vfs, MNTOPT_NOLOGGING, NULL, 0);
		error = 0;
		goto out;
	}

#ifdef LUFS
	/*
	 * File system must be write locked to disable logging
	 */
	error = qfs_fiolfss(vp, &lf);
	if (error) {
		goto out;
	}
	if (!LOCKFS_IS_ULOCK(&lf)) {
		flp->error = FIOLOG_EULOCK;
		error = 0;
		goto out;
	}
	lf.lf_lock = LOCKFS_WLOCK;
	lf.lf_flags = 0;
	lf.lf_comment = NULL;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_EWLOCK;
		error = 0;
		goto out;
	}
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LUFS */

	if (LQFS_GET_LOGP(qfsvfsp) == NULL || LQFS_GET_LOGBNO(fs) == 0) {
		goto errout;
	}

	/*
	 * WE ARE COMMITTED TO DISABLING LOGGING PAST THIS POINT
	 */

	/*
	 * Disable logging:
	 * Suspend the reclaim thread and force the delete thread to exit.
	 *	When a nologging mount has completed there may still be
	 *	work for reclaim to do so just suspend this thread until
	 *	it's [deadlock-] safe for it to continue.  The delete
	 *	thread won't be needed as qfs_iinactive() calls
	 *	qfs_delete() when logging is disabled.
	 * Freeze and drain reader ops.
	 *	Commit any outstanding reader transactions (lqfs_flush).
	 *	Set the ``unmounted'' bit in the qfstrans struct.
	 *	If debug, remove metadata from matamap.
	 *	Disable matamap processing.
	 *	NULL the trans ops table.
	 *	Free all of the incore structs related to logging.
	 * Allow reader ops.
	 */
#ifdef LUFS
	qfs_thread_suspend(&qfsvfsp->vfs_reclaim);
	qfs_thread_exit(&qfsvfsp->vfs_delete);
#else
	/* QFS doesn't have file reclaim nor i-node delete threads. */
#endif /* LUFS */

	vfs_lock_wait(qfsvfsp->vfs_vfs);
#ifdef LQFS_TODO_LOCKFS
	ulp = &qfsvfsp->vfs_ulockfs;
	mutex_enter(&ulp->ul_lock);
	(void) qfs_quiesce(ulp);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

#ifdef LQFS_TODO
	(void) qfs_flush(qfsvfsp->vfs_vfs);
#else
	(void) lqfs_flush(qfsvfsp);
	if (LQFS_GET_LOGP(qfsvfsp)) {
		logmap_start_roll(LQFS_GET_LOGP(qfsvfsp));
	}
#endif /* LQFS_TODO */

	TRANS_MATA_UMOUNT(qfsvfsp);
	LQFS_SET_DOMATAMAP(qfsvfsp, 0);

	/*
	 * Free all of the incore structs
	 * Aquire the ufs_scan_lock before de-linking the mtm data
	 * structure so that we keep ufs_sync() and ufs_update() away
	 * when they execute the ufs_scan_inodes() run while we're in
	 * progress of enabling/disabling logging.
	 */
	mutex_enter(&qfs_scan_lock);
	(void) lqfs_unsnarf(qfsvfsp);
	mutex_exit(&qfs_scan_lock);

#ifdef LQFS_TODO_LOCKFS
	atomic_add_long(&ufs_quiesce_pend, -1);
	mutex_exit(&ulp->ul_lock);
#else
	/* QFS doesn't do this yet. */
#endif /* LQFS_TODO_LOCKFS */
	vfs_setmntopt(qfsvfsp->vfs_vfs, MNTOPT_NOLOGGING, NULL, 0);
	vfs_unlock(qfsvfsp->vfs_vfs);

	LQFS_SET_FS_ROLLED(fs, FS_ALL_ROLLED);
	LQFS_SET_NOLOG_SI(qfsvfsp, 0);

	/*
	 * Free the log space and mark the superblock as FSACTIVE
	 */
	(void) lqfs_free(qfsvfsp);

#ifdef LUFS
	/*
	 * Allow the reclaim thread to continue.
	 */
	qfs_thread_continue(&qfsvfsp->vfs_reclaim);
#else
	/* QFS doesn't have a file reclaim thread. */
#endif /* LUFS */

#ifdef LQFS_TODO_LOCKFS
	/*
	 * Unlock the file system
	 */
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_ENOULOCK;
	}
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_LOCKFS */

	error = 0;
	goto out;

errout:
#ifdef LQFS_LOCKFS
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	(void) qfs_fiolfs(vp, &lf, 1);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_LOCKFS */

out:
	mutex_enter(&ip->mp->ms.m_waitwr_mutex);
	ip->mp->mt.fi_status |= FS_LOGSTATE_KNOWN;
	mutex_exit(&ip->mp->ms.m_waitwr_mutex);
	return (error);
}