/* * First stage of freeze - no writers will make progress now we are here, * so we flush delwri and delalloc buffers here, then wait for all I/O to * complete. Data is frozen at that point. Metadata is not frozen, * transactions can still occur here so don't bother flushing the buftarg * because it'll just get dirty again. */ int xfs_quiesce_data( struct xfs_mount *mp) { int error, error2 = 0; /* push non-blocking */ xfs_sync_data(mp, 0); xfs_qm_sync(mp, SYNC_TRYLOCK); /* push and block till complete */ xfs_sync_data(mp, SYNC_WAIT); xfs_qm_sync(mp, SYNC_WAIT); /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp); /* make sure all delwri buffers are written out */ xfs_flush_buftarg(mp->m_ddev_targp, 1); /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) XFS_bflush(mp->m_rtdev_targp); return error ? error : error2; }
/* * First stage of freeze - no writers will make progress now we are here, * so we flush delwri and delalloc buffers here, then wait for all I/O to * complete. Data is frozen at that point. Metadata is not frozen, * transactions can still occur here so don't bother flushing the buftarg * because it'll just get dirty again. */ int xfs_quiesce_data( struct xfs_mount *mp) { int error, error2 = 0; xfs_qm_sync(mp, SYNC_TRYLOCK); xfs_qm_sync(mp, SYNC_WAIT); /* force out the newly dirtied log buffers */ xfs_log_force(mp, XFS_LOG_SYNC); /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp); /* make sure all delwri buffers are written out */ xfs_flush_buftarg(mp->m_ddev_targp, 1); /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) XFS_bflush(mp->m_rtdev_targp); return error ? error : error2; }
/* * First stage of freeze - no writers will make progress now we are here, * so we flush delwri and delalloc buffers here, then wait for all I/O to * complete. Data is frozen at that point. Metadata is not frozen, * transactions can still occur here so don't bother flushing the buftarg * because it'll just get dirty again. */ int xfs_quiesce_data( struct xfs_mount *mp) { int error, error2 = 0; /* push non-blocking */ xfs_sync_data(mp, 0); xfs_qm_sync(mp, SYNC_TRYLOCK); /* push and block till complete */ xfs_sync_data(mp, SYNC_WAIT); /* * Log all pending size and timestamp updates. The vfs writeback * code is supposed to do this, but due to its overagressive * livelock detection it will skip inodes where appending writes * were written out in the first non-blocking sync phase if their * completion took long enough that it happened after taking the * timestamp for the cut-off in the blocking phase. */ xfs_inode_ag_iterator(mp, xfs_log_dirty_inode, 0); xfs_qm_sync(mp, SYNC_WAIT); /* write superblock and hoover up shutdown errors */ error = xfs_sync_fsdata(mp); /* make sure all delwri buffers are written out */ xfs_flush_buftarg(mp->m_ddev_targp, 1); /* mark the log as covered if needed */ if (xfs_log_need_covered(mp)) error2 = xfs_fs_log_dummy(mp); /* flush data-only devices */ if (mp->m_rtdev_targp) XFS_bflush(mp->m_rtdev_targp); return error ? error : error2; }
/* PRIVATE, debugging */ int xfs_qm_internalqcheck( xfs_mount_t *mp) { xfs_ino_t lastino; int done, count; int i; xfs_dqtest_t *d, *e; xfs_dqhash_t *h1; int error; lastino = 0; qmtest_hashmask = 32; count = 5; done = 0; qmtest_nfails = 0; if (! XFS_IS_QUOTA_ON(mp)) return XFS_ERROR(ESRCH); xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); XFS_bflush(mp->m_ddev_targp); xfs_log_force(mp, (xfs_lsn_t)0, XFS_LOG_FORCE | XFS_LOG_SYNC); XFS_bflush(mp->m_ddev_targp); mutex_lock(&qcheck_lock); /* There should be absolutely no quota activity while this is going on. */ qmtest_udqtab = kmem_zalloc(qmtest_hashmask * sizeof(xfs_dqhash_t), KM_SLEEP); qmtest_gdqtab = kmem_zalloc(qmtest_hashmask * sizeof(xfs_dqhash_t), KM_SLEEP); do { /* * Iterate thru all the inodes in the file system, * adjusting the corresponding dquot counters */ if ((error = xfs_bulkstat(mp, &lastino, &count, xfs_qm_internalqcheck_adjust, NULL, 0, NULL, BULKSTAT_FG_IGET, &done))) { break; } } while (! done); if (error) { cmn_err(CE_DEBUG, "Bulkstat returned error 0x%x", error); } cmn_err(CE_DEBUG, "Checking results against system dquots"); for (i = 0; i < qmtest_hashmask; i++) { h1 = &qmtest_udqtab[i]; for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { xfs_dqtest_cmp(d); e = (xfs_dqtest_t *) d->HL_NEXT; kmem_free(d, sizeof(xfs_dqtest_t)); d = e; } h1 = &qmtest_gdqtab[i]; for (d = (xfs_dqtest_t *) h1->qh_next; d != NULL; ) { xfs_dqtest_cmp(d); e = (xfs_dqtest_t *) d->HL_NEXT; kmem_free(d, sizeof(xfs_dqtest_t)); d = e; } } if (qmtest_nfails) { cmn_err(CE_DEBUG, "******** quotacheck failed ********"); cmn_err(CE_DEBUG, "failures = %d", qmtest_nfails); } else { cmn_err(CE_DEBUG, "******** quotacheck successful! ********"); } kmem_free(qmtest_udqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); kmem_free(qmtest_gdqtab, qmtest_hashmask * sizeof(xfs_dqhash_t)); mutex_unlock(&qcheck_lock); return (qmtest_nfails); }
STATIC int xfs_unmount( bhv_desc_t *bdp, int flags, cred_t *credp) { struct vfs *vfsp = bhvtovfs(bdp); xfs_mount_t *mp = XFS_BHVTOM(bdp); xfs_inode_t *rip; vnode_t *rvp; int unmount_event_wanted = 0; int unmount_event_flags = 0; int xfs_unmountfs_needed = 0; int error; rip = mp->m_rootip; rvp = XFS_ITOV(rip); if (vfsp->vfs_flag & VFS_DMI) { error = XFS_SEND_PREUNMOUNT(mp, vfsp, rvp, DM_RIGHT_NULL, rvp, DM_RIGHT_NULL, NULL, NULL, 0, 0, (mp->m_dmevmask & (1<<DM_EVENT_PREUNMOUNT))? 0:DM_FLAGS_UNWANTED); if (error) return XFS_ERROR(error); unmount_event_wanted = 1; unmount_event_flags = (mp->m_dmevmask & (1<<DM_EVENT_UNMOUNT))? 0 : DM_FLAGS_UNWANTED; } /* * First blow any referenced inode from this file system * out of the reference cache, and delete the timer. */ xfs_refcache_purge_mp(mp); XFS_bflush(mp->m_ddev_targp); error = xfs_unmount_flush(mp, 0); if (error) goto out; ASSERT(vn_count(rvp) == 1); /* * Drop the reference count */ VN_RELE(rvp); /* * If we're forcing a shutdown, typically because of a media error, * we want to make sure we invalidate dirty pages that belong to * referenced vnodes as well. */ if (XFS_FORCED_SHUTDOWN(mp)) { error = xfs_sync(&mp->m_bhv, (SYNC_WAIT | SYNC_CLOSE), credp); ASSERT(error != EFSCORRUPTED); } xfs_unmountfs_needed = 1; out: /* Send DMAPI event, if required. * Then do xfs_unmountfs() if needed. * Then return error (or zero). */ if (unmount_event_wanted) { /* Note: mp structure must still exist for * XFS_SEND_UNMOUNT() call. */ XFS_SEND_UNMOUNT(mp, vfsp, error == 0 ? rvp : NULL, DM_RIGHT_NULL, 0, error, unmount_event_flags); } if (xfs_unmountfs_needed) { /* * Call common unmount function to flush to disk * and free the super block buffer & mount structures. */ xfs_unmountfs(mp, credp); } return XFS_ERROR(error); }