/* * Every sync period we need to unpin all items, reclaim inodes and sync * disk quotas. We might need to cover the log to indicate that the * filesystem is idle and not frozen. */ STATIC void xfs_sync_worker( struct work_struct *work) { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_sync_work); int error; /* * We shouldn't write/force the log if we are in the mount/unmount * process or on a read only filesystem. The workqueue still needs to be * active in both cases, however, because it is used for inode reclaim * during these times. Use the MS_ACTIVE flag to avoid doing anything * during mount. Doing work during unmount is avoided by calling * cancel_delayed_work_sync on this work queue before tearing down * the ail and the log in xfs_log_unmount. */ if (!(mp->m_super->s_flags & MS_ACTIVE) && !(mp->m_flags & XFS_MOUNT_RDONLY)) { /* dgc: errors ignored here */ if (mp->m_super->s_writers.frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) error = xfs_fs_log_dummy(mp); else xfs_log_force(mp, 0); /* start pushing all the metadata that is currently * dirty */ xfs_ail_push_all(mp->m_ail); } /* queue us up again */ xfs_syncd_queue_sync(mp); }
/* * Every sync period we need to unpin all items, reclaim inodes and sync * disk quotas. We might need to cover the log to indicate that the * filesystem is idle and not frozen. */ STATIC void xfs_sync_worker( struct work_struct *work) { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_sync_work); int error; /* * We shouldn't write/force the log if we are in the mount/unmount * process or on a read only filesystem. The workqueue still needs to be * active in both cases, however, because it is used for inode reclaim * during these times. Use the s_umount semaphore to provide exclusion * with unmount. */ if (down_read_trylock(&mp->m_super->s_umount)) { if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { /* dgc: errors ignored here */ if (mp->m_super->s_frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) error = xfs_fs_log_dummy(mp); else xfs_log_force(mp, 0); /* start pushing all the metadata that is currently * dirty */ xfs_ail_push_all(mp->m_ail); } up_read(&mp->m_super->s_umount); } /* queue us up again */ xfs_syncd_queue_sync(mp); }
int xfs_syncd_init( struct xfs_mount *mp) { INIT_WORK(&mp->m_flush_work, xfs_flush_worker); INIT_DELAYED_WORK(&mp->m_sync_work, xfs_sync_worker); INIT_DELAYED_WORK(&mp->m_reclaim_work, xfs_reclaim_worker); xfs_syncd_queue_sync(mp); return 0; }
STATIC void xfs_sync_worker( struct work_struct *work) { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_sync_work); int error; if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { if (mp->m_super->s_frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) error = xfs_fs_log_dummy(mp); else xfs_log_force(mp, 0); xfs_ail_push_all(mp->m_ail); } xfs_syncd_queue_sync(mp); }
/* * Every sync period we need to unpin all items, reclaim inodes and sync * disk quotas. We might need to cover the log to indicate that the * filesystem is idle and not frozen. */ STATIC void xfs_sync_worker( struct work_struct *work) { struct xfs_mount *mp = container_of(to_delayed_work(work), struct xfs_mount, m_sync_work); int error; if (!(mp->m_flags & XFS_MOUNT_RDONLY)) { /* dgc: errors ignored here */ if (mp->m_super->s_frozen == SB_UNFROZEN && xfs_log_need_covered(mp)) error = xfs_fs_log_dummy(mp); else xfs_log_force(mp, 0); error = xfs_qm_sync(mp, SYNC_TRYLOCK); /* start pushing all the metadata that is currently dirty */ xfs_ail_push_all(mp->m_ail); } /* queue us up again */ xfs_syncd_queue_sync(mp); }