Esempio n. 1
0
int
lqfs_snarf(qfsvfs_t *qfsvfsp, fs_lqfs_common_t *fs, int ronly)
{
	buf_t		*bp, *tbp;
	ml_unit_t	*ul;
	extent_block_t	*ebp;
	ic_extent_block_t  *nebp;
	size_t		nb;
	daddr_t		bno;	/* in disk blocks */
	int		ord;
	int		i;

	/* LINTED: warning: logical expression always true: op "||" */
	ASSERT(sizeof (ml_odunit_t) < DEV_BSIZE);

	/*
	 * Get the allocation table
	 *	During a remount the superblock pointed to by the qfsvfsp
	 *	is out of date.  Hence the need for the ``new'' superblock
	 *	pointer, fs, passed in as a parameter.
	 */
	sam_bread_db(qfsvfsp, qfsvfsp->mi.m_fs[LQFS_GET_LOGORD(fs)].dev,
	    logbtodb(fs, LQFS_GET_LOGBNO(fs)), FS_BSIZE(fs), &bp);
	if (bp->b_flags & B_ERROR) {
		brelse(bp);
		return (EIO);
	}
	ebp = (void *)bp->b_un.b_addr;
	if (!checksum(&ebp->chksum, (int32_t *)(void *)bp->b_un.b_addr,
	    FS_BSIZE(fs))) {
		brelse(bp);
		return (ENODEV);
	}

	/*
	 * It is possible to get log blocks with all zeros.
	 * We should also check for nextents to be zero in such case.
	 */
	if (ebp->type != LQFS_EXTENTS || ebp->nextents == 0) {
		brelse(bp);
		return (EDOM);
	}
	/*
	 * Put allocation into memory.  This requires conversion between
	 * on the ondisk format of the extent (type extent_t) and the
	 * in-core format of the extent (type ic_extent_t).  The
	 * difference is the in-core form of the extent block stores
	 * the physical offset of the extent in disk blocks, which
	 * can require more than a 32-bit field.
	 */
	nb = (size_t)(sizeof (ic_extent_block_t) +
	    ((ebp->nextents - 1) * sizeof (ic_extent_t)));
	nebp = kmem_alloc(nb, KM_SLEEP);
	nebp->ic_nextents = ebp->nextents;
	nebp->ic_nbytes = ebp->nbytes;
	nebp->ic_nextbno = ebp->nextbno;
	nebp->ic_nextord = ebp->nextord;
	for (i = 0; i < ebp->nextents; i++) {
		nebp->ic_extents[i].ic_lbno = ebp->extents[i].lbno;
		nebp->ic_extents[i].ic_nbno = ebp->extents[i].nbno;
		nebp->ic_extents[i].ic_pbno =
		    logbtodb(fs, ebp->extents[i].pbno);
		nebp->ic_extents[i].ic_ord = ebp->extents[i].ord;
	}
	brelse(bp);

	/*
	 * Get the log state
	 */
	bno = nebp->ic_extents[0].ic_pbno;
	ord = nebp->ic_extents[0].ic_ord;
	sam_bread_db(qfsvfsp, qfsvfsp->mi.m_fs[ord].dev, bno, DEV_BSIZE, &bp);
	if (bp->b_flags & B_ERROR) {
		brelse(bp);
		sam_bread_db(qfsvfsp, qfsvfsp->mi.m_fs[ord].dev, bno + 1,
		    DEV_BSIZE, &bp);
		if (bp->b_flags & B_ERROR) {
			brelse(bp);
			kmem_free(nebp, nb);
			return (EIO);
		}
	}

	/*
	 * Put ondisk struct into an anonymous buffer
	 *	This buffer will contain the memory for the ml_odunit struct
	 */
	tbp = ngeteblk(dbtob(LS_SECTORS));
	tbp->b_edev = bp->b_edev;
	tbp->b_dev = bp->b_dev;
	tbp->b_blkno = bno;
	bcopy(bp->b_un.b_addr, tbp->b_un.b_addr, DEV_BSIZE);
	bcopy(bp->b_un.b_addr, tbp->b_un.b_addr + DEV_BSIZE, DEV_BSIZE);
	bp->b_flags |= (B_STALE | B_AGE);
	brelse(bp);
	bp = tbp;

	/*
	 * Verify the log state
	 *
	 * read/only mounts w/bad logs are allowed.  umount will
	 * eventually roll the bad log until the first IO error.
	 * fsck will then repair the file system.
	 *
	 * read/write mounts with bad logs are not allowed.
	 *
	 */
	ul = (ml_unit_t *)kmem_zalloc(sizeof (*ul), KM_SLEEP);
	bcopy(bp->b_un.b_addr, &ul->un_ondisk, sizeof (ml_odunit_t));
	if ((ul->un_chksum != ul->un_head_ident + ul->un_tail_ident) ||
	    (ul->un_version != LQFS_VERSION_LATEST) ||
	    (!ronly && ul->un_badlog)) {
		kmem_free(ul, sizeof (*ul));
		brelse(bp);
		kmem_free(nebp, nb);
		return (EIO);
	}
	/*
	 * Initialize the incore-only fields
	 */
	if (ronly) {
		ul->un_flags |= LDL_NOROLL;
	}
	ul->un_bp = bp;
	ul->un_qfsvfs = qfsvfsp;
	ul->un_dev = qfsvfsp->mi.m_fs[ord].dev;
	ul->un_ebp = nebp;
	ul->un_nbeb = nb;
	ul->un_maxresv = btodb(ul->un_logsize) * LDL_USABLE_BSIZE;
	ul->un_deltamap = map_get(ul, deltamaptype, DELTAMAP_NHASH);
	ul->un_logmap = map_get(ul, logmaptype, LOGMAP_NHASH);
	if (ul->un_debug & MT_MATAMAP) {
		ul->un_matamap = map_get(ul, matamaptype, DELTAMAP_NHASH);
	}
	sam_mutex_init(&ul->un_log_mutex, NULL, MUTEX_DEFAULT, NULL);
	sam_mutex_init(&ul->un_state_mutex, NULL, MUTEX_DEFAULT, NULL);

	/*
	 * Aquire the qfs_scan_lock before linking the mtm data
	 * structure so that we keep qfs_sync() and qfs_update() away
	 * when they execute the qfs_scan_inodes() run while we're in
	 * progress of enabling/disabling logging.
	 */

	mutex_enter(&qfs_scan_lock);
	LQFS_SET_LOGP(qfsvfsp, ul);
	ml_unit_validate(ul);

	/* remember the state of the log before the log scan */
	logmap_logscan(ul);
	mutex_exit(&qfs_scan_lock);

	/*
	 * Error during scan
	 *
	 * If this is a read/only mount; ignore the error.
	 * At a later time umount/fsck will repair the fs.
	 *
	 */
	if (ul->un_flags & LDL_ERROR) {
		if (!ronly) {
			/*
			 * Aquire the qfs_scan_lock before de-linking
			 * the mtm data structure so that we keep qfs_sync()
			 * and qfs_update() away when they execute the
			 * qfs_scan_inodes() run while we're in progress of
			 * enabling/disabling logging.
			 */
			mutex_enter(&qfs_scan_lock);
			lqfs_unsnarf(qfsvfsp);
			mutex_exit(&qfs_scan_lock);
			return (EIO);
		}
		ul->un_flags &= ~LDL_ERROR;
	}
	if (!ronly) {
		logmap_start_roll(ul);
	}

	return (0);
}
Esempio n. 2
0
/* ARGSUSED */
int
ufs_fioffs(
	struct vnode	*vp,
	char 		*vap,		/* must be NULL - reserved */
	struct cred	*cr)		/* credentials from ufs_ioctl */
{
	int error;
	struct ufsvfs	*ufsvfsp;
	struct ulockfs	*ulp;

	/* file system has been forcibly unmounted */
	ufsvfsp = VTOI(vp)->i_ufsvfs;
	if (ufsvfsp == NULL)
		return (EIO);

	ulp = &ufsvfsp->vfs_ulockfs;

	/*
	 * suspend the delete thread
	 *	this must be done outside the lockfs locking protocol
	 */
	vfs_lock_wait(vp->v_vfsp);
	ufs_thread_suspend(&ufsvfsp->vfs_delete);

	/* hold the mutex to prevent race with a lockfs request */
	mutex_enter(&ulp->ul_lock);
	atomic_inc_ulong(&ufs_quiesce_pend);

	if (ULOCKFS_IS_HLOCK(ulp)) {
		error = EIO;
		goto out;
	}
	if (ULOCKFS_IS_ELOCK(ulp)) {
		error = EBUSY;
		goto out;
	}
	/* wait for outstanding accesses to finish */
	if (error = ufs_quiesce(ulp))
		goto out;

	/*
	 * If logging, and the logmap was marked as not rollable,
	 * make it rollable now, and start the trans_roll thread and
	 * the reclaim thread.  The log at this point is safe to write to.
	 */
	if (ufsvfsp->vfs_log) {
		ml_unit_t	*ul = ufsvfsp->vfs_log;
		struct fs	*fsp = ufsvfsp->vfs_fs;
		int		err;

		if (ul->un_flags & LDL_NOROLL) {
			ul->un_flags &= ~LDL_NOROLL;
			logmap_start_roll(ul);
			if (!fsp->fs_ronly && (fsp->fs_reclaim &
			    (FS_RECLAIM|FS_RECLAIMING))) {
				fsp->fs_reclaim &= ~FS_RECLAIM;
				fsp->fs_reclaim |= FS_RECLAIMING;
				ufs_thread_start(&ufsvfsp->vfs_reclaim,
				    ufs_thread_reclaim, vp->v_vfsp);
				if (!fsp->fs_ronly) {
					TRANS_SBWRITE(ufsvfsp,
					    TOP_SBUPDATE_UPDATE);
					if (err =
					    geterror(ufsvfsp->vfs_bufp)) {
						refstr_t	*mntpt;
						mntpt = vfs_getmntpoint(
						    vp->v_vfsp);
						cmn_err(CE_NOTE,
						    "Filesystem Flush "
						    "Failed to update "
						    "Reclaim Status for "
						    " %s, Write failed to "
						    "update superblock, "
						    "error %d",
						    refstr_value(mntpt),
						    err);
						refstr_rele(mntpt);
					}
				}
			}
		}
	}

	/* synchronously flush dirty data and metadata */
	error = ufs_flush(vp->v_vfsp);

out:
	atomic_dec_ulong(&ufs_quiesce_pend);
	cv_broadcast(&ulp->ul_cv);
	mutex_exit(&ulp->ul_lock);
	vfs_unlock(vp->v_vfsp);

	/*
	 * allow the delete thread to continue
	 */
	ufs_thread_continue(&ufsvfsp->vfs_delete);
	return (error);
}
Esempio n. 3
0
/*
 * Disable logging
 */
int
lqfs_disable(vnode_t *vp, struct fiolog *flp)
{
	int		error = 0;
	inode_t		*ip = VTOI(vp);
	qfsvfs_t	*qfsvfsp = ip->i_qfsvfs;
	fs_lqfs_common_t	*fs = VFS_FS_PTR(qfsvfsp);
#ifdef LUFS
	struct lockfs	lf;
	struct ulockfs	*ulp;
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LUFS */

	flp->error = FIOLOG_ENONE;

	/*
	 * Logging is already disabled; done
	 */
	if (LQFS_GET_LOGBNO(fs) == 0 || LQFS_GET_LOGP(qfsvfsp) == NULL ||
	    !LQFS_CAPABLE(qfsvfsp)) {
		vfs_setmntopt(qfsvfsp->vfs_vfs, MNTOPT_NOLOGGING, NULL, 0);
		error = 0;
		goto out;
	}

#ifdef LUFS
	/*
	 * File system must be write locked to disable logging
	 */
	error = qfs_fiolfss(vp, &lf);
	if (error) {
		goto out;
	}
	if (!LOCKFS_IS_ULOCK(&lf)) {
		flp->error = FIOLOG_EULOCK;
		error = 0;
		goto out;
	}
	lf.lf_lock = LOCKFS_WLOCK;
	lf.lf_flags = 0;
	lf.lf_comment = NULL;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_EWLOCK;
		error = 0;
		goto out;
	}
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LUFS */

	if (LQFS_GET_LOGP(qfsvfsp) == NULL || LQFS_GET_LOGBNO(fs) == 0) {
		goto errout;
	}

	/*
	 * WE ARE COMMITTED TO DISABLING LOGGING PAST THIS POINT
	 */

	/*
	 * Disable logging:
	 * Suspend the reclaim thread and force the delete thread to exit.
	 *	When a nologging mount has completed there may still be
	 *	work for reclaim to do so just suspend this thread until
	 *	it's [deadlock-] safe for it to continue.  The delete
	 *	thread won't be needed as qfs_iinactive() calls
	 *	qfs_delete() when logging is disabled.
	 * Freeze and drain reader ops.
	 *	Commit any outstanding reader transactions (lqfs_flush).
	 *	Set the ``unmounted'' bit in the qfstrans struct.
	 *	If debug, remove metadata from matamap.
	 *	Disable matamap processing.
	 *	NULL the trans ops table.
	 *	Free all of the incore structs related to logging.
	 * Allow reader ops.
	 */
#ifdef LUFS
	qfs_thread_suspend(&qfsvfsp->vfs_reclaim);
	qfs_thread_exit(&qfsvfsp->vfs_delete);
#else
	/* QFS doesn't have file reclaim nor i-node delete threads. */
#endif /* LUFS */

	vfs_lock_wait(qfsvfsp->vfs_vfs);
#ifdef LQFS_TODO_LOCKFS
	ulp = &qfsvfsp->vfs_ulockfs;
	mutex_enter(&ulp->ul_lock);
	(void) qfs_quiesce(ulp);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_TODO_LOCKFS */

#ifdef LQFS_TODO
	(void) qfs_flush(qfsvfsp->vfs_vfs);
#else
	(void) lqfs_flush(qfsvfsp);
	if (LQFS_GET_LOGP(qfsvfsp)) {
		logmap_start_roll(LQFS_GET_LOGP(qfsvfsp));
	}
#endif /* LQFS_TODO */

	TRANS_MATA_UMOUNT(qfsvfsp);
	LQFS_SET_DOMATAMAP(qfsvfsp, 0);

	/*
	 * Free all of the incore structs
	 * Aquire the ufs_scan_lock before de-linking the mtm data
	 * structure so that we keep ufs_sync() and ufs_update() away
	 * when they execute the ufs_scan_inodes() run while we're in
	 * progress of enabling/disabling logging.
	 */
	mutex_enter(&qfs_scan_lock);
	(void) lqfs_unsnarf(qfsvfsp);
	mutex_exit(&qfs_scan_lock);

#ifdef LQFS_TODO_LOCKFS
	atomic_add_long(&ufs_quiesce_pend, -1);
	mutex_exit(&ulp->ul_lock);
#else
	/* QFS doesn't do this yet. */
#endif /* LQFS_TODO_LOCKFS */
	vfs_setmntopt(qfsvfsp->vfs_vfs, MNTOPT_NOLOGGING, NULL, 0);
	vfs_unlock(qfsvfsp->vfs_vfs);

	LQFS_SET_FS_ROLLED(fs, FS_ALL_ROLLED);
	LQFS_SET_NOLOG_SI(qfsvfsp, 0);

	/*
	 * Free the log space and mark the superblock as FSACTIVE
	 */
	(void) lqfs_free(qfsvfsp);

#ifdef LUFS
	/*
	 * Allow the reclaim thread to continue.
	 */
	qfs_thread_continue(&qfsvfsp->vfs_reclaim);
#else
	/* QFS doesn't have a file reclaim thread. */
#endif /* LUFS */

#ifdef LQFS_TODO_LOCKFS
	/*
	 * Unlock the file system
	 */
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	error = qfs_fiolfs(vp, &lf, 1);
	if (error) {
		flp->error = FIOLOG_ENOULOCK;
	}
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_LOCKFS */

	error = 0;
	goto out;

errout:
#ifdef LQFS_LOCKFS
	lf.lf_lock = LOCKFS_ULOCK;
	lf.lf_flags = 0;
	(void) qfs_fiolfs(vp, &lf, 1);
#else
	/* QFS doesn't really support LOCKFS. */
#endif /* LQFS_LOCKFS */

out:
	mutex_enter(&ip->mp->ms.m_waitwr_mutex);
	ip->mp->mt.fi_status |= FS_LOGSTATE_KNOWN;
	mutex_exit(&ip->mp->ms.m_waitwr_mutex);
	return (error);
}