/* * When remounting a filesystem read-only or freezing the filesystem, we have * two phases to execute. This first phase is syncing the data before we * quiesce the filesystem, and the second is flushing all the inodes out after * we've waited for all the transactions created by the first phase to * complete. The second phase ensures that the inodes are written to their * location on disk rather than just existing in transactions in the log. This * means after a quiesce there is no log replay required to write the inodes to * disk (this is the main difference between a sync and a quiesce). */ int xfs_quiesce_data( struct xfs_mount *mp) { int error, error2 = 0; xfs_log_force(mp, XFS_LOG_SYNC); error = xfs_sync_fsdata(mp); /* make sure all delwri buffers are written out */ xfs_flush_buftarg(mp->m_ddev_targp, 1); if (xfs_log_need_covered(mp)) error2 = xfs_fs_log_dummy(mp); if (mp->m_rtdev_targp) xfs_flush_buftarg(mp->m_rtdev_targp, 1); return error ? error : error2; }
STATIC void xfs_quiesce_fs( struct xfs_mount *mp) { int count = 0, pincount; xfs_reclaim_inodes(mp, 0); xfs_flush_buftarg(mp->m_ddev_targp, 0); /* * This loop must run at least twice. The first instance of the loop * will flush most meta data but that will generate more meta data * (typically directory updates). Which then must be flushed and * logged before we can write the unmount record. We also so sync * reclaim of inodes to catch any that the above delwri flush skipped. */ do { xfs_reclaim_inodes(mp, SYNC_WAIT); xfs_sync_attr(mp, SYNC_WAIT); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { delay(50); count++; } } while (count < 2); }
/* * First stage of freeze - no writers will make progress now we are here, * so we flush delwri and delalloc buffers here, then wait for all I/O to * complete. Data is frozen at that point. Metadata is not frozen, * transactions can still occur here so don't bother flushing the buftarg * because it'll just get dirty again. */ int xfs_quiesce_data( struct xfs_mount *mp) { int error, error2 = 0; xfs_qm_sync(mp, SYNC_TRYLOCK); xfs_qm_sync(mp, SYNC_WAIT); /* force out the newly dirtied log buffers */ xfs_log_force(mp, XFS_LOG_SYNC); /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp); /* make sure all delwri buffers are written out */ xfs_flush_buftarg(mp->m_ddev_targp, 1); /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) xfs_flush_buftarg(mp->m_rtdev_targp, 1); return error ? error : error2; }
STATIC int xfs_mntupdate( bhv_desc_t *bdp, int *flags, struct xfs_mount_args *args) { struct vfs *vfsp = bhvtovfs(bdp); xfs_mount_t *mp = XFS_BHVTOM(bdp); int pincount, error; int count = 0; if (args->flags & XFSMNT_NOATIME) mp->m_flags |= XFS_MOUNT_NOATIME; else mp->m_flags &= ~XFS_MOUNT_NOATIME; if (!(vfsp->vfs_flag & VFS_RDONLY)) { VFS_SYNC(vfsp, SYNC_FSDATA|SYNC_BDFLUSH|SYNC_ATTR, NULL, error); } if (*flags & MS_RDONLY) { xfs_refcache_purge_mp(mp); xfs_flush_buftarg(mp->m_ddev_targp, 0); xfs_finish_reclaim_all(mp, 0); /* This loop must run at least twice. * The first instance of the loop will flush * most meta data but that will generate more * meta data (typically directory updates). * Which then must be flushed and logged before * we can write the unmount record. */ do { VFS_SYNC(vfsp, REMOUNT_READONLY_FLAGS, NULL, error); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { delay(50); count++; } } while (count < 2); /* Ok now write out an unmount record */ xfs_log_unmount_write(mp); xfs_unmountfs_writesb(mp); vfsp->vfs_flag |= VFS_RDONLY; } else { vfsp->vfs_flag &= ~VFS_RDONLY; } return 0; }
/* * First stage of freeze - no writers will make progress now we are here, * so we flush delwri and delalloc buffers here, then wait for all I/O to * complete. Data is frozen at that point. Metadata is not frozen, * transactions can still occur here so don't bother flushing the buftarg * because it'll just get dirty again. */ int xfs_quiesce_data( struct xfs_mount *mp) { int error, error2 = 0; /* push non-blocking */ xfs_sync_data(mp, 0); xfs_qm_sync(mp, SYNC_TRYLOCK); /* push and block till complete */ xfs_sync_data(mp, SYNC_WAIT); xfs_qm_sync(mp, SYNC_WAIT); /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp); /* make sure all delwri buffers are written out */ xfs_flush_buftarg(mp->m_ddev_targp, 1); /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) XFS_bflush(mp->m_rtdev_targp); return error ? error : error2; }
void xfs_free_buftarg( xfs_buftarg_t *btp) { xfs_flush_buftarg(btp); kmem_free(btp, sizeof(*btp)); }
void xfs_free_buftarg( xfs_buftarg_t *btp, int external) { xfs_flush_buftarg(btp, /* wait */ 0); kmem_free(btp, sizeof(*btp)); }
STATIC void xfs_quiesce_fs( struct xfs_mount *mp) { int count = 0, pincount; xfs_reclaim_inodes(mp, 0); xfs_flush_buftarg(mp->m_ddev_targp, 0); do { xfs_reclaim_inodes(mp, SYNC_WAIT); xfs_sync_attr(mp, SYNC_WAIT); pincount = xfs_flush_buftarg(mp->m_ddev_targp, 1); if (!pincount) { delay(50); count++; } } while (count < 2); }
/* * First stage of freeze - no writers will make progress now we are here, * so we flush delwri and delalloc buffers here, then wait for all I/O to * complete. Data is frozen at that point. Metadata is not frozen, * transactions can still occur here so don't bother flushing the buftarg * because it'll just get dirty again. */ int xfs_quiesce_data( struct xfs_mount *mp) { int error, error2 = 0; /* push non-blocking */ xfs_sync_data(mp, 0); xfs_qm_sync(mp, SYNC_TRYLOCK); /* push and block till complete */ xfs_sync_data(mp, SYNC_WAIT); /* * Log all pending size and timestamp updates. The vfs writeback * code is supposed to do this, but due to its overagressive * livelock detection it will skip inodes where appending writes * were written out in the first non-blocking sync phase if their * completion took long enough that it happened after taking the * timestamp for the cut-off in the blocking phase. */ xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); xfs_qm_sync(mp, SYNC_WAIT); /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp); /* make sure all delwri buffers are written out */ xfs_flush_buftarg(mp->m_ddev_targp, 1); /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) XFS_bflush(mp->m_rtdev_targp); return error ? error : error2; }