int ffs_wapbl_stop(struct mount *mp, int force) { struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs = ump->um_fs; int error; if (mp->mnt_wapbl) { KDASSERT(fs->fs_ronly == 0); /* * Make sure turning off FS_DOWAPBL is only removed * as the only change in the final flush since otherwise * a transaction may reorder writes. */ error = wapbl_flush(mp->mnt_wapbl, 1); if (error && !force) return error; if (error && force) goto forceout; error = UFS_WAPBL_BEGIN(mp); if (error && !force) return error; if (error && force) goto forceout; KASSERT(fs->fs_flags & FS_DOWAPBL); fs->fs_flags &= ~FS_DOWAPBL; error = ffs_sbupdate(ump, MNT_WAIT); KASSERT(error == 0); /* XXX a bit drastic! */ UFS_WAPBL_END(mp); forceout: error = wapbl_stop(mp->mnt_wapbl, force); if (error) { KASSERT(!force); fs->fs_flags |= FS_DOWAPBL; return error; } fs->fs_flags &= ~FS_DOWAPBL; /* Repeat in case of forced error */ mp->mnt_wapbl = NULL; #ifdef WAPBL_DEBUG printf("%s: disabled logging\n", fs->fs_fsmnt); #endif } return 0; }
/* ARGSUSED */ int ffs_full_fsync(struct vnode *vp, int flags) { int error, i, uflags; struct mount *mp; KASSERT(vp->v_tag == VT_UFS); KASSERT(VTOI(vp) != NULL); KASSERT(vp->v_type != VCHR && vp->v_type != VBLK); error = 0; uflags = UPDATE_CLOSE | ((flags & FSYNC_WAIT) ? UPDATE_WAIT : 0); mp = vp->v_mount; /* * Flush all dirty data associated with the vnode. */ if (vp->v_type == VREG) { int pflags = PGO_ALLPAGES | PGO_CLEANIT; if ((flags & FSYNC_WAIT)) pflags |= PGO_SYNCIO; if (fstrans_getstate(mp) == FSTRANS_SUSPENDING) pflags |= PGO_FREE; mutex_enter(vp->v_interlock); error = VOP_PUTPAGES(vp, 0, 0, pflags); if (error) return error; } #ifdef WAPBL if (mp && mp->mnt_wapbl) { /* * Don't bother writing out metadata if the syncer is * making the request. We will let the sync vnode * write it out in a single burst through a call to * VFS_SYNC(). */ if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY)) != 0) return 0; if ((VTOI(vp)->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) != 0) { error = UFS_WAPBL_BEGIN(mp); if (error) return error; error = ffs_update(vp, NULL, NULL, uflags); UFS_WAPBL_END(mp); } if (error || (flags & FSYNC_NOLOG) != 0) return error; /* * Don't flush the log if the vnode being flushed * contains no dirty buffers that could be in the log. */ if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { error = wapbl_flush(mp->mnt_wapbl, 0); if (error) return error; } if ((flags & FSYNC_WAIT) != 0) { mutex_enter(vp->v_interlock); while (vp->v_numoutput != 0) cv_wait(&vp->v_cv, vp->v_interlock); mutex_exit(vp->v_interlock); } return error; } #endif /* WAPBL */ error = vflushbuf(vp, (flags & FSYNC_WAIT) != 0); if (error == 0) error = ffs_update(vp, NULL, NULL, uflags); if (error == 0 && (flags & FSYNC_CACHE) != 0) { i = 1; (void)VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &i, FWRITE, kauth_cred_get()); } return error; }
int ffs_wapbl_start(struct mount *mp) { struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs = ump->um_fs; struct vnode *devvp = ump->um_devvp; daddr_t off; size_t count; size_t blksize; uint64_t extradata; int error; if (mp->mnt_wapbl == NULL) { if (fs->fs_journal_flags & UFS_WAPBL_FLAGS_CLEAR_LOG) { /* Clear out any existing journal file */ error = wapbl_remove_log(mp); if (error != 0) return error; } if (mp->mnt_flag & MNT_LOG) { KDASSERT(fs->fs_ronly == 0); /* WAPBL needs UFS2 format super block */ if (ffs_superblock_layout(fs) < 2) { printf("%s fs superblock in old format, " "not journaling\n", VFSTOUFS(mp)->um_fs->fs_fsmnt); mp->mnt_flag &= ~MNT_LOG; return EINVAL; } error = wapbl_log_position(mp, fs, devvp, &off, &count, &blksize, &extradata); if (error) return error; error = wapbl_start(&mp->mnt_wapbl, mp, devvp, off, count, blksize, mp->mnt_wapbl_replay, ffs_wapbl_sync_metadata, ffs_wapbl_abort_sync_metadata); if (error) return error; mp->mnt_wapbl_op = &wapbl_ops; #ifdef WAPBL_DEBUG printf("%s: enabling logging\n", fs->fs_fsmnt); #endif if ((fs->fs_flags & FS_DOWAPBL) == 0) { UFS_WAPBL_BEGIN(mp); fs->fs_flags |= FS_DOWAPBL; error = ffs_sbupdate(ump, MNT_WAIT); if (error) { UFS_WAPBL_END(mp); ffs_wapbl_stop(mp, MNT_FORCE); return error; } UFS_WAPBL_END(mp); error = wapbl_flush(mp->mnt_wapbl, 1); if (error) { ffs_wapbl_stop(mp, MNT_FORCE); return error; } } } else if (fs->fs_flags & FS_DOWAPBL) { fs->fs_fmod = 1; fs->fs_flags &= ~FS_DOWAPBL; } } /* * It is recommended that you finish replay with logging enabled. * However, even if logging is not enabled, the remaining log * replay should be safely recoverable with an fsck, so perform * it anyway. */ if ((fs->fs_ronly == 0) && mp->mnt_wapbl_replay) { int saveflag = mp->mnt_flag & MNT_RDONLY; /* * Make sure MNT_RDONLY is not set so that the inode * cleanup in ufs_inactive will actually do its work. */ mp->mnt_flag &= ~MNT_RDONLY; ffs_wapbl_replay_finish(mp); mp->mnt_flag |= saveflag; KASSERT(fs->fs_ronly == 0); } return 0; }
int ffs_fsync(void *v) { struct vop_fsync_args /* { struct vnode *a_vp; kauth_cred_t a_cred; int a_flags; off_t a_offlo; off_t a_offhi; struct lwp *a_l; } */ *ap = v; struct buf *bp; int num, error, i; struct indir ia[NIADDR + 1]; int bsize; daddr_t blk_high; struct vnode *vp; struct mount *mp; vp = ap->a_vp; mp = vp->v_mount; fstrans_start(mp, FSTRANS_LAZY); if ((ap->a_offlo == 0 && ap->a_offhi == 0) || (vp->v_type != VREG)) { error = ffs_full_fsync(vp, ap->a_flags); goto out; } bsize = mp->mnt_stat.f_iosize; blk_high = ap->a_offhi / bsize; if (ap->a_offhi % bsize != 0) blk_high++; /* * First, flush all pages in range. */ mutex_enter(vp->v_interlock); error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo), round_page(ap->a_offhi), PGO_CLEANIT | ((ap->a_flags & FSYNC_WAIT) ? PGO_SYNCIO : 0)); if (error) { goto out; } #ifdef WAPBL KASSERT(vp->v_type == VREG); if (mp->mnt_wapbl) { /* * Don't bother writing out metadata if the syncer is * making the request. We will let the sync vnode * write it out in a single burst through a call to * VFS_SYNC(). */ if ((ap->a_flags & (FSYNC_DATAONLY | FSYNC_LAZY)) != 0) { fstrans_done(mp); return 0; } error = 0; if (vp->v_tag == VT_UFS && VTOI(vp)->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) { error = UFS_WAPBL_BEGIN(mp); if (error) { fstrans_done(mp); return error; } error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE | ((ap->a_flags & FSYNC_WAIT) ? UPDATE_WAIT : 0)); UFS_WAPBL_END(mp); } if (error || (ap->a_flags & FSYNC_NOLOG) != 0) { fstrans_done(mp); return error; } error = wapbl_flush(mp->mnt_wapbl, 0); fstrans_done(mp); return error; } #endif /* WAPBL */ /* * Then, flush indirect blocks. */ if (blk_high >= NDADDR) { error = ufs_getlbns(vp, blk_high, ia, &num); if (error) goto out; mutex_enter(&bufcache_lock); for (i = 0; i < num; i++) { if ((bp = incore(vp, ia[i].in_lbn)) == NULL) continue; if ((bp->b_cflags & BC_BUSY) != 0 || (bp->b_oflags & BO_DELWRI) == 0) continue; bp->b_cflags |= BC_BUSY | BC_VFLUSH; mutex_exit(&bufcache_lock); bawrite(bp); mutex_enter(&bufcache_lock); } mutex_exit(&bufcache_lock); } if (ap->a_flags & FSYNC_WAIT) { mutex_enter(vp->v_interlock); while (vp->v_numoutput > 0) cv_wait(&vp->v_cv, vp->v_interlock); mutex_exit(vp->v_interlock); } error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE | (((ap->a_flags & (FSYNC_WAIT | FSYNC_DATAONLY)) == FSYNC_WAIT) ? UPDATE_WAIT : 0)); if (error == 0 && ap->a_flags & FSYNC_CACHE) { int l = 0; VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE, curlwp->l_cred); } out: fstrans_done(mp); return error; }