/*
 * When remounting a filesystem read-only or freezing the filesystem, we have
 * two phases to execute. This first phase is syncing the data before we
 * quiesce the filesystem, and the second is flushing all the inodes out after
 * we've waited for all the transactions created by the first phase to
 * complete. The second phase ensures that the inodes are written to their
 * location on disk rather than just existing in transactions in the log. This
 * means after a quiesce there is no log replay required to write the inodes to
 * disk (this is the main difference between a sync and a quiesce).
 */
int
xfs_quiesce_data(
	struct xfs_mount	*mp)
{
	int			error, error2 = 0;

	
	xfs_log_force(mp, XFS_LOG_SYNC);

	
	error = xfs_sync_fsdata(mp);

	/* make sure all delwri buffers are written out */
	xfs_flush_buftarg(mp->m_ddev_targp, 1);

	
	if (xfs_log_need_covered(mp))
		error2 = xfs_fs_log_dummy(mp);

	
	if (mp->m_rtdev_targp)
		xfs_flush_buftarg(mp->m_rtdev_targp, 1);

	return error ? error : error2;
}
Exemplo n.º 2
0
/*
 * Every sync period we need to unpin all items, reclaim inodes and sync
 * disk quotas.  We might need to cover the log to indicate that the
 * filesystem is idle and not frozen.
 */
STATIC void
xfs_sync_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_sync_work);
	int		error;

	/*
	 * We shouldn't write/force the log if we are in the mount/unmount
	 * process or on a read only filesystem. The workqueue still needs to be
	 * active in both cases, however, because it is used for inode reclaim
	 * during these times.  Use the MS_ACTIVE flag to avoid doing anything
	 * during mount.  Doing work during unmount is avoided by calling
	 * cancel_delayed_work_sync on this work queue before tearing down
	 * the ail and the log in xfs_log_unmount.
	 */
	if (!(mp->m_super->s_flags & MS_ACTIVE) &&
	    !(mp->m_flags & XFS_MOUNT_RDONLY)) {
		/* dgc: errors ignored here */
		if (mp->m_super->s_writers.frozen == SB_UNFROZEN &&
		    xfs_log_need_covered(mp))
			error = xfs_fs_log_dummy(mp);
		else
			xfs_log_force(mp, 0);

		/* start pushing all the metadata that is currently
		 * dirty */
		xfs_ail_push_all(mp->m_ail);
	}

	/* queue us up again */
	xfs_syncd_queue_sync(mp);
}
Exemplo n.º 3
0
/*
 * First stage of freeze - no writers will make progress now we are here,
 * so we flush delwri and delalloc buffers here, then wait for all I/O to
 * complete.  Data is frozen at that point. Metadata is not frozen,
 * transactions can still occur here so don't bother flushing the buftarg
 * because it'll just get dirty again.
 */
int
xfs_quiesce_data(
	struct xfs_mount	*mp)
{
	int			error, error2 = 0;

	xfs_qm_sync(mp, SYNC_TRYLOCK);
	xfs_qm_sync(mp, SYNC_WAIT);

	/* force out the newly dirtied log buffers */
	xfs_log_force(mp, XFS_LOG_SYNC);

	/* write superblock and hoover up shutdown errors */
	error = xfs_sync_fsdata(mp);

	/* make sure all delwri buffers are written out */
	xfs_flush_buftarg(mp->m_ddev_targp, 1);

	/* mark the log as covered if needed */
	if (xfs_log_need_covered(mp))
		error2 = xfs_fs_log_dummy(mp);

	/* flush data-only devices */
	if (mp->m_rtdev_targp)
		XFS_bflush(mp->m_rtdev_targp);

	return error ? error : error2;
}
Exemplo n.º 4
0
/*
 * First stage of freeze - no writers will make progress now we are here,
 * so we flush delwri and delalloc buffers here, then wait for all I/O to
 * complete.  Data is frozen at that point. Metadata is not frozen,
 * transactions can still occur here so don't bother flushing the buftarg
 * because it'll just get dirty again.
 */
int
xfs_quiesce_data(
	struct xfs_mount	*mp)
{
	int			error, error2 = 0;

	/* push non-blocking */
	xfs_sync_data(mp, 0);
	xfs_qm_sync(mp, SYNC_TRYLOCK);

	/* push and block till complete */
	xfs_sync_data(mp, SYNC_WAIT);
	xfs_qm_sync(mp, SYNC_WAIT);

	/* write superblock and hoover up shutdown errors */
	error = xfs_sync_fsdata(mp);

	/* make sure all delwri buffers are written out */
	xfs_flush_buftarg(mp->m_ddev_targp, 1);

	/* mark the log as covered if needed */
	if (xfs_log_need_covered(mp))
		error2 = xfs_fs_log_dummy(mp);

	/* flush data-only devices */
	if (mp->m_rtdev_targp)
		XFS_bflush(mp->m_rtdev_targp);

	return error ? error : error2;
}
Exemplo n.º 5
0
/*
 * Every sync period we need to unpin all items, reclaim inodes and sync
 * disk quotas.  We might need to cover the log to indicate that the
 * filesystem is idle and not frozen.
 */
STATIC void
xfs_sync_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_sync_work);
	int		error;

	/*
	 * We shouldn't write/force the log if we are in the mount/unmount
	 * process or on a read only filesystem. The workqueue still needs to be
	 * active in both cases, however, because it is used for inode reclaim
	 * during these times.  Use the s_umount semaphore to provide exclusion
	 * with unmount.
	 */
	if (down_read_trylock(&mp->m_super->s_umount)) {
		if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
			/* dgc: errors ignored here */
			if (mp->m_super->s_frozen == SB_UNFROZEN &&
			    xfs_log_need_covered(mp))
				error = xfs_fs_log_dummy(mp);
			else
				xfs_log_force(mp, 0);

			/* start pushing all the metadata that is currently
			 * dirty */
			xfs_ail_push_all(mp->m_ail);
		}
		up_read(&mp->m_super->s_umount);
	}

	/* queue us up again */
	xfs_syncd_queue_sync(mp);
}
Exemplo n.º 6
0
/*
 * First stage of freeze - no writers will make progress now we are here,
 * so we flush delwri and delalloc buffers here, then wait for all I/O to
 * complete.  Data is frozen at that point. Metadata is not frozen,
 * transactions can still occur here so don't bother emptying the AIL
 * because it'll just get dirty again.
 */
int
xfs_quiesce_data(
	struct xfs_mount	*mp)
{
	int			error, error2 = 0;

	/* force out the log */
	xfs_log_force(mp, XFS_LOG_SYNC);

	/* write superblock and hoover up shutdown errors */
	error = xfs_sync_fsdata(mp);

	/* mark the log as covered if needed */
	if (xfs_log_need_covered(mp))
		error2 = xfs_fs_log_dummy(mp);

	return error ? error : error2;
}
Exemplo n.º 7
0
/*
 * First stage of freeze - no writers will make progress now we are here,
 * so we flush delwri and delalloc buffers here, then wait for all I/O to
 * complete.  Data is frozen at that point. Metadata is not frozen,
 * transactions can still occur here so don't bother flushing the buftarg
 * because it'll just get dirty again.
 */
int
xfs_quiesce_data(
	struct xfs_mount	*mp)
{
	int			error, error2 = 0;

	/* push non-blocking */
	xfs_sync_data(mp, 0);
	xfs_qm_sync(mp, SYNC_TRYLOCK);

	/* push and block till complete */
	xfs_sync_data(mp, SYNC_WAIT);

	/*
	 * Log all pending size and timestamp updates.  The vfs writeback
	 * code is supposed to do this, but due to its overagressive
	 * livelock detection it will skip inodes where appending writes
	 * were written out in the first non-blocking sync phase if their
	 * completion took long enough that it happened after taking the
	 * timestamp for the cut-off in the blocking phase.
	 */
	xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0);

	xfs_qm_sync(mp, SYNC_WAIT);

	/* write superblock and hoover up shutdown errors */
	error = xfs_sync_fsdata(mp);

	/* make sure all delwri buffers are written out */
	xfs_flush_buftarg(mp->m_ddev_targp, 1);

	/* mark the log as covered if needed */
	if (xfs_log_need_covered(mp))
		error2 = xfs_fs_log_dummy(mp);

	/* flush data-only devices */
	if (mp->m_rtdev_targp)
		XFS_bflush(mp->m_rtdev_targp);

	return error ? error : error2;
}
STATIC void
xfs_sync_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_sync_work);
	int		error;

	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
		
		if (mp->m_super->s_frozen == SB_UNFROZEN &&
		    xfs_log_need_covered(mp))
			error = xfs_fs_log_dummy(mp);
		else
			xfs_log_force(mp, 0);

		
		xfs_ail_push_all(mp->m_ail);
	}

	
	xfs_syncd_queue_sync(mp);
}
Exemplo n.º 9
0
/*
 * Every sync period we need to unpin all items, reclaim inodes and sync
 * disk quotas.  We might need to cover the log to indicate that the
 * filesystem is idle and not frozen.
 */
STATIC void
xfs_sync_worker(
	struct work_struct *work)
{
	struct xfs_mount *mp = container_of(to_delayed_work(work),
					struct xfs_mount, m_sync_work);
	int		error;

	if (!(mp->m_flags & XFS_MOUNT_RDONLY)) {
		/* dgc: errors ignored here */
		if (mp->m_super->s_frozen == SB_UNFROZEN &&
		    xfs_log_need_covered(mp))
			error = xfs_fs_log_dummy(mp);
		else
			xfs_log_force(mp, 0);
		error = xfs_qm_sync(mp, SYNC_TRYLOCK);

		/* start pushing all the metadata that is currently dirty */
		xfs_ail_push_all(mp->m_ail);
	}

	/* queue us up again */
	xfs_syncd_queue_sync(mp);
}