/* * ufs_link: create hard link. */ int ufs_link(void *v) { struct vop_link_args /* { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; } */ *ap = v; struct vnode *dvp = ap->a_dvp; struct vnode *vp = ap->a_vp; struct componentname *cnp = ap->a_cnp; struct inode *ip; struct direct *newdir; int error; struct ufs_lookup_results *ulr; KASSERT(dvp != vp); KASSERT(vp->v_type != VDIR); KASSERT(dvp->v_mount == vp->v_mount); /* XXX should handle this material another way */ ulr = &VTOI(dvp)->i_crap; UFS_CHECK_CRAPCOUNTER(VTOI(dvp)); fstrans_start(dvp->v_mount, FSTRANS_SHARED); error = vn_lock(vp, LK_EXCLUSIVE); if (error) { VOP_ABORTOP(dvp, cnp); goto out2; } ip = VTOI(vp); if ((nlink_t)ip->i_nlink >= LINK_MAX) { VOP_ABORTOP(dvp, cnp); error = EMLINK; goto out1; } if (ip->i_flags & (IMMUTABLE | APPEND)) { VOP_ABORTOP(dvp, cnp); error = EPERM; goto out1; } error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) { VOP_ABORTOP(dvp, cnp); goto out1; } ip->i_nlink++; DIP_ASSIGN(ip, nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; error = UFS_UPDATE(vp, NULL, NULL, UPDATE_DIROP); if (!error) { newdir = pool_cache_get(ufs_direct_cache, PR_WAITOK); ufs_makedirentry(ip, cnp, newdir); error = ufs_direnter(dvp, ulr, vp, newdir, cnp, NULL); pool_cache_put(ufs_direct_cache, newdir); } if (error) { ip->i_nlink--; DIP_ASSIGN(ip, nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(vp, NULL, NULL, UPDATE_DIROP); } UFS_WAPBL_END(vp->v_mount); out1: VOP_UNLOCK(vp); out2: VN_KNOTE(vp, NOTE_LINK); VN_KNOTE(dvp, NOTE_WRITE); vput(dvp); fstrans_done(dvp->v_mount); return (error); }
int ufs_rmdir(void *v) { struct vop_rmdir_args /* { struct vnode *a_dvp; struct vnode *a_vp; struct componentname *a_cnp; } */ *ap = v; struct vnode *vp, *dvp; struct componentname *cnp; struct inode *ip, *dp; int error; struct ufs_lookup_results *ulr; vp = ap->a_vp; dvp = ap->a_dvp; cnp = ap->a_cnp; ip = VTOI(vp); dp = VTOI(dvp); /* XXX should handle this material another way */ ulr = &dp->i_crap; UFS_CHECK_CRAPCOUNTER(dp); /* * No rmdir "." or of mounted directories please. */ if (dp == ip || vp->v_mountedhere != NULL) { if (dp == ip) vrele(dvp); else vput(dvp); vput(vp); return (EINVAL); } fstrans_start(dvp->v_mount, FSTRANS_SHARED); /* * Do not remove a directory that is in the process of being renamed. * Verify that the directory is empty (and valid). (Rmdir ".." won't * be valid since ".." will contain a reference to the current * directory and thus be non-empty.) */ error = 0; if (ip->i_nlink != 2 || !ufs_dirempty(ip, dp->i_number, cnp->cn_cred)) { error = ENOTEMPTY; goto out; } if ((dp->i_flags & APPEND) || (ip->i_flags & (IMMUTABLE | APPEND))) { error = EPERM; goto out; } error = UFS_WAPBL_BEGIN(dvp->v_mount); if (error) goto out; /* * Delete reference to directory before purging * inode. If we crash in between, the directory * will be reattached to lost+found, */ error = ufs_dirremove(dvp, ulr, ip, cnp->cn_flags, 1); if (error) { UFS_WAPBL_END(dvp->v_mount); goto out; } VN_KNOTE(dvp, NOTE_WRITE | NOTE_LINK); cache_purge(dvp); /* * Truncate inode. The only stuff left in the directory is "." and * "..". The "." reference is inconsequential since we're quashing * it. */ dp->i_nlink--; DIP_ASSIGN(dp, nlink, dp->i_nlink); dp->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(dvp, NULL, NULL, UPDATE_DIROP); ip->i_nlink--; DIP_ASSIGN(ip, nlink, ip->i_nlink); ip->i_flag |= IN_CHANGE; error = UFS_TRUNCATE(vp, (off_t)0, IO_SYNC, cnp->cn_cred); cache_purge(vp); /* * Unlock the log while we still have reference to unlinked * directory vp so that it will not get locked for recycling */ UFS_WAPBL_END(dvp->v_mount); #ifdef UFS_DIRHASH if (ip->i_dirhash != NULL) ufsdirhash_free(ip); #endif out: VN_KNOTE(vp, NOTE_DELETE); vput(vp); fstrans_done(dvp->v_mount); vput(dvp); return (error); }
/* * Set attribute vnode op. called from several syscalls */ int ufs_setattr(void *v) { struct vop_setattr_args /* { struct vnode *a_vp; struct vattr *a_vap; kauth_cred_t a_cred; } */ *ap = v; struct vattr *vap; struct vnode *vp; struct inode *ip; kauth_cred_t cred; struct lwp *l; int error; kauth_action_t action; bool changing_sysflags; vap = ap->a_vap; vp = ap->a_vp; ip = VTOI(vp); cred = ap->a_cred; l = curlwp; action = KAUTH_VNODE_WRITE_FLAGS; changing_sysflags = false; /* * Check for unsettable attributes. */ if ((vap->va_type != VNON) || (vap->va_nlink != VNOVAL) || (vap->va_fsid != VNOVAL) || (vap->va_fileid != VNOVAL) || (vap->va_blocksize != VNOVAL) || (vap->va_rdev != VNOVAL) || ((int)vap->va_bytes != VNOVAL) || (vap->va_gen != VNOVAL)) { return (EINVAL); } fstrans_start(vp->v_mount, FSTRANS_SHARED); if (vap->va_flags != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) { error = EROFS; goto out; } /* Snapshot flag cannot be set or cleared */ if ((vap->va_flags & (SF_SNAPSHOT | SF_SNAPINVAL)) != (ip->i_flags & (SF_SNAPSHOT | SF_SNAPINVAL))) { error = EPERM; goto out; } if (ip->i_flags & (SF_IMMUTABLE | SF_APPEND)) { action |= KAUTH_VNODE_HAS_SYSFLAGS; } if ((vap->va_flags & SF_SETTABLE) != (ip->i_flags & SF_SETTABLE)) { action |= KAUTH_VNODE_WRITE_SYSFLAGS; changing_sysflags = true; } error = kauth_authorize_vnode(cred, action, vp, NULL, genfs_can_chflags(cred, vp->v_type, ip->i_uid, changing_sysflags)); if (error) goto out; if (changing_sysflags) { error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; ip->i_flags = vap->va_flags; DIP_ASSIGN(ip, flags, ip->i_flags); } else { error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; ip->i_flags &= SF_SETTABLE; ip->i_flags |= (vap->va_flags & UF_SETTABLE); DIP_ASSIGN(ip, flags, ip->i_flags); } ip->i_flag |= IN_CHANGE; UFS_WAPBL_UPDATE(vp, NULL, NULL, 0); UFS_WAPBL_END(vp->v_mount); if (vap->va_flags & (IMMUTABLE | APPEND)) { error = 0; goto out; } } if (ip->i_flags & (IMMUTABLE | APPEND)) { error = EPERM; goto out; } /* * Go through the fields and update iff not VNOVAL. */ if (vap->va_uid != (uid_t)VNOVAL || vap->va_gid != (gid_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) { error = EROFS; goto out; } error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; error = ufs_chown(vp, vap->va_uid, vap->va_gid, cred, l); UFS_WAPBL_END(vp->v_mount); if (error) goto out; } if (vap->va_size != VNOVAL) { /* * Disallow write attempts on read-only file systems; * unless the file is a socket, fifo, or a block or * character device resident on the file system. */ switch (vp->v_type) { case VDIR: error = EISDIR; goto out; case VCHR: case VBLK: case VFIFO: break; case VREG: if (vp->v_mount->mnt_flag & MNT_RDONLY) { error = EROFS; goto out; } if ((ip->i_flags & SF_SNAPSHOT) != 0) { error = EPERM; goto out; } error = ufs_truncate(vp, vap->va_size, cred); if (error) goto out; break; default: error = EOPNOTSUPP; goto out; } } ip = VTOI(vp); if (vap->va_atime.tv_sec != VNOVAL || vap->va_mtime.tv_sec != VNOVAL || vap->va_birthtime.tv_sec != VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) { error = EROFS; goto out; } if ((ip->i_flags & SF_SNAPSHOT) != 0) { error = EPERM; goto out; } error = kauth_authorize_vnode(cred, KAUTH_VNODE_WRITE_TIMES, vp, NULL, genfs_can_chtimes(vp, vap->va_vaflags, ip->i_uid, cred)); if (error) goto out; error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; if (vap->va_atime.tv_sec != VNOVAL) if (!(vp->v_mount->mnt_flag & MNT_NOATIME)) ip->i_flag |= IN_ACCESS; if (vap->va_mtime.tv_sec != VNOVAL) { ip->i_flag |= IN_CHANGE | IN_UPDATE; if (vp->v_mount->mnt_flag & MNT_RELATIME) ip->i_flag |= IN_ACCESS; } if (vap->va_birthtime.tv_sec != VNOVAL && ip->i_ump->um_fstype == UFS2) { ip->i_ffs2_birthtime = vap->va_birthtime.tv_sec; ip->i_ffs2_birthnsec = vap->va_birthtime.tv_nsec; } error = UFS_UPDATE(vp, &vap->va_atime, &vap->va_mtime, 0); UFS_WAPBL_END(vp->v_mount); if (error) goto out; } error = 0; if (vap->va_mode != (mode_t)VNOVAL) { if (vp->v_mount->mnt_flag & MNT_RDONLY) { error = EROFS; goto out; } if ((ip->i_flags & SF_SNAPSHOT) != 0 && (vap->va_mode & (S_IXUSR | S_IWUSR | S_IXGRP | S_IWGRP | S_IXOTH | S_IWOTH))) { error = EPERM; goto out; } error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; error = ufs_chmod(vp, (int)vap->va_mode, cred, l); UFS_WAPBL_END(vp->v_mount); } VN_KNOTE(vp, NOTE_ATTRIB); out: fstrans_done(vp->v_mount); return (error); }
/* * Last reference to an inode. If necessary, write or delete it. */ int ufs_inactive(void *v) { struct vop_inactive_args /* { struct vnode *a_vp; struct bool *a_recycle; } */ *ap = v; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct mount *transmp; mode_t mode; int error = 0; int logged = 0; UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount); transmp = vp->v_mount; fstrans_start(transmp, FSTRANS_SHARED); /* * Ignore inodes related to stale file handles. */ if (ip->i_mode == 0) goto out; if (ip->i_ffs_effnlink == 0 && DOINGSOFTDEP(vp)) softdep_releasefile(ip); if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; logged = 1; #ifdef QUOTA (void)chkiq(ip, -1, NOCRED, 0); #endif #ifdef UFS_EXTATTR ufs_extattr_vnode_inactive(vp, curlwp); #endif if (ip->i_size != 0) { /* * When journaling, only truncate one indirect block * at a time */ if (vp->v_mount->mnt_wapbl) { uint64_t incr = MNINDIR(ip->i_ump) << vp->v_mount->mnt_fs_bshift; /* Power of 2 */ uint64_t base = NDADDR << vp->v_mount->mnt_fs_bshift; while (!error && ip->i_size > base + incr) { /* * round down to next full indirect * block boundary. */ uint64_t nsize = base + ((ip->i_size - base - 1) & ~(incr - 1)); error = UFS_TRUNCATE(vp, nsize, 0, NOCRED); if (error) break; UFS_WAPBL_END(vp->v_mount); error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; } } if (!error) error = UFS_TRUNCATE(vp, (off_t)0, 0, NOCRED); } /* * Setting the mode to zero needs to wait for the inode * to be written just as does a change to the link count. * So, rather than creating a new entry point to do the * same thing, we just use softdep_change_linkcnt(). */ DIP_ASSIGN(ip, rdev, 0); mode = ip->i_mode; ip->i_mode = 0; DIP_ASSIGN(ip, mode, 0); ip->i_flag |= IN_CHANGE | IN_UPDATE; mutex_enter(&vp->v_interlock); vp->v_iflag |= VI_FREEING; mutex_exit(&vp->v_interlock); if (DOINGSOFTDEP(vp)) softdep_change_linkcnt(ip); UFS_VFREE(vp, ip->i_number, mode); } if (ip->i_flag & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) { if (!logged++) { int err; err = UFS_WAPBL_BEGIN(vp->v_mount); if (err) goto out; } UFS_UPDATE(vp, NULL, NULL, 0); } if (logged) UFS_WAPBL_END(vp->v_mount); out: /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ *ap->a_recycle = (ip->i_mode == 0); VOP_UNLOCK(vp, 0); fstrans_done(transmp); return (error); }
int ffs_wapbl_start(struct mount *mp) { struct ufsmount *ump = VFSTOUFS(mp); struct fs *fs = ump->um_fs; struct vnode *devvp = ump->um_devvp; daddr_t off; size_t count; size_t blksize; uint64_t extradata; int error; if (mp->mnt_wapbl == NULL) { if (fs->fs_journal_flags & UFS_WAPBL_FLAGS_CLEAR_LOG) { /* Clear out any existing journal file */ error = wapbl_remove_log(mp); if (error != 0) return error; } if (mp->mnt_flag & MNT_LOG) { KDASSERT(fs->fs_ronly == 0); /* WAPBL needs UFS2 format super block */ if (ffs_superblock_layout(fs) < 2) { printf("%s fs superblock in old format, " "not journaling\n", VFSTOUFS(mp)->um_fs->fs_fsmnt); mp->mnt_flag &= ~MNT_LOG; return EINVAL; } error = wapbl_log_position(mp, fs, devvp, &off, &count, &blksize, &extradata); if (error) return error; error = wapbl_start(&mp->mnt_wapbl, mp, devvp, off, count, blksize, mp->mnt_wapbl_replay, ffs_wapbl_sync_metadata, ffs_wapbl_abort_sync_metadata); if (error) return error; mp->mnt_wapbl_op = &wapbl_ops; #ifdef WAPBL_DEBUG printf("%s: enabling logging\n", fs->fs_fsmnt); #endif if ((fs->fs_flags & FS_DOWAPBL) == 0) { UFS_WAPBL_BEGIN(mp); fs->fs_flags |= FS_DOWAPBL; error = ffs_sbupdate(ump, MNT_WAIT); if (error) { UFS_WAPBL_END(mp); ffs_wapbl_stop(mp, MNT_FORCE); return error; } UFS_WAPBL_END(mp); error = wapbl_flush(mp->mnt_wapbl, 1); if (error) { ffs_wapbl_stop(mp, MNT_FORCE); return error; } } } else if (fs->fs_flags & FS_DOWAPBL) { fs->fs_fmod = 1; fs->fs_flags &= ~FS_DOWAPBL; } } /* * It is recommended that you finish replay with logging enabled. * However, even if logging is not enabled, the remaining log * replay should be safely recoverable with an fsck, so perform * it anyway. */ if ((fs->fs_ronly == 0) && mp->mnt_wapbl_replay) { int saveflag = mp->mnt_flag & MNT_RDONLY; /* * Make sure MNT_RDONLY is not set so that the inode * cleanup in ufs_inactive will actually do its work. */ mp->mnt_flag &= ~MNT_RDONLY; ffs_wapbl_replay_finish(mp); mp->mnt_flag |= saveflag; KASSERT(fs->fs_ronly == 0); } return 0; }
/* * Last reference to an inode. If necessary, write or delete it. */ int ufs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct fs *fs = ip->i_fs; struct proc *p = curproc; mode_t mode; int error = 0, logged = 0, truncate_error = 0; #ifdef DIAGNOSTIC extern int prtactive; if (prtactive && vp->v_usecount != 0) vprint("ufs_inactive: pushing active", vp); #endif UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount); /* * Ignore inodes related to stale file handles. */ if (ip->i_din1 == NULL || DIP(ip, mode) == 0) goto out; if (DIP(ip, nlink) <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; logged = 1; if (getinoquota(ip) == 0) (void)ufs_quota_free_inode(ip, NOCRED); if (DIP(ip, size) != 0 && vp->v_mount->mnt_wapbl) { /* * When journaling, only truncate one indirect block at * a time. */ uint64_t incr = MNINDIR(ip->i_ump) << fs->fs_bshift; uint64_t base = NDADDR << fs->fs_bshift; while (!error && DIP(ip, size) > base + incr) { /* * round down to next full indirect block * boundary. */ uint64_t nsize = base + ((DIP(ip, size) - base - 1) & ~(incr - 1)); error = UFS_TRUNCATE(ip, nsize, 0, NOCRED); if (error) break; UFS_WAPBL_END(vp->v_mount); error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; } } if (error == 0) { truncate_error = UFS_TRUNCATE(ip, (off_t)0, 0, NOCRED); /* XXX pedro: remove me */ if (truncate_error) printf("UFS_TRUNCATE()=%d\n", truncate_error); } DIP_ASSIGN(ip, rdev, 0); mode = DIP(ip, mode); DIP_ASSIGN(ip, mode, 0); ip->i_flag |= IN_CHANGE | IN_UPDATE; /* * Setting the mode to zero needs to wait for the inode to be * written just as does a change to the link count. So, rather * than creating a new entry point to do the same thing, we * just use softdep_change_linkcnt(). Also, we can't let * softdep co-opt us to help on its worklist, as we may end up * trying to recycle vnodes and getting to this same point a * couple of times, blowing the kernel stack. However, this * could be optimized by checking if we are coming from * vrele(), vput() or vclean() (by checking for VXLOCK) and * just avoiding the co-opt to happen in the last case. */ if (DOINGSOFTDEP(vp)) softdep_change_linkcnt(ip, 1); UFS_INODE_FREE(ip, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { if (!logged++) { int err; err = UFS_WAPBL_BEGIN(vp->v_mount); if (err) { error = err; goto out; } } UFS_UPDATE(ip, 0); } if (logged) UFS_WAPBL_END(vp->v_mount); out: VOP_UNLOCK(vp, 0); /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (error == 0 && truncate_error == 0 && (ip->i_din1 == NULL || DIP(ip, mode) == 0)) vrecycle(vp, p); return (truncate_error ? truncate_error : error); }
/* ARGSUSED */ int ffs_full_fsync(struct vnode *vp, int flags) { int error, i, uflags; struct mount *mp; KASSERT(vp->v_tag == VT_UFS); KASSERT(VTOI(vp) != NULL); KASSERT(vp->v_type != VCHR && vp->v_type != VBLK); error = 0; uflags = UPDATE_CLOSE | ((flags & FSYNC_WAIT) ? UPDATE_WAIT : 0); mp = vp->v_mount; /* * Flush all dirty data associated with the vnode. */ if (vp->v_type == VREG) { int pflags = PGO_ALLPAGES | PGO_CLEANIT; if ((flags & FSYNC_WAIT)) pflags |= PGO_SYNCIO; if (fstrans_getstate(mp) == FSTRANS_SUSPENDING) pflags |= PGO_FREE; mutex_enter(vp->v_interlock); error = VOP_PUTPAGES(vp, 0, 0, pflags); if (error) return error; } #ifdef WAPBL if (mp && mp->mnt_wapbl) { /* * Don't bother writing out metadata if the syncer is * making the request. We will let the sync vnode * write it out in a single burst through a call to * VFS_SYNC(). */ if ((flags & (FSYNC_DATAONLY | FSYNC_LAZY)) != 0) return 0; if ((VTOI(vp)->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) != 0) { error = UFS_WAPBL_BEGIN(mp); if (error) return error; error = ffs_update(vp, NULL, NULL, uflags); UFS_WAPBL_END(mp); } if (error || (flags & FSYNC_NOLOG) != 0) return error; /* * Don't flush the log if the vnode being flushed * contains no dirty buffers that could be in the log. */ if (!LIST_EMPTY(&vp->v_dirtyblkhd)) { error = wapbl_flush(mp->mnt_wapbl, 0); if (error) return error; } if ((flags & FSYNC_WAIT) != 0) { mutex_enter(vp->v_interlock); while (vp->v_numoutput != 0) cv_wait(&vp->v_cv, vp->v_interlock); mutex_exit(vp->v_interlock); } return error; } #endif /* WAPBL */ error = vflushbuf(vp, (flags & FSYNC_WAIT) != 0); if (error == 0) error = ffs_update(vp, NULL, NULL, uflags); if (error == 0 && (flags & FSYNC_CACHE) != 0) { i = 1; (void)VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &i, FWRITE, kauth_cred_get()); } return error; }
int ffs_fsync(void *v) { struct vop_fsync_args /* { struct vnode *a_vp; kauth_cred_t a_cred; int a_flags; off_t a_offlo; off_t a_offhi; struct lwp *a_l; } */ *ap = v; struct buf *bp; int num, error, i; struct indir ia[NIADDR + 1]; int bsize; daddr_t blk_high; struct vnode *vp; struct mount *mp; vp = ap->a_vp; mp = vp->v_mount; fstrans_start(mp, FSTRANS_LAZY); if ((ap->a_offlo == 0 && ap->a_offhi == 0) || (vp->v_type != VREG)) { error = ffs_full_fsync(vp, ap->a_flags); goto out; } bsize = mp->mnt_stat.f_iosize; blk_high = ap->a_offhi / bsize; if (ap->a_offhi % bsize != 0) blk_high++; /* * First, flush all pages in range. */ mutex_enter(vp->v_interlock); error = VOP_PUTPAGES(vp, trunc_page(ap->a_offlo), round_page(ap->a_offhi), PGO_CLEANIT | ((ap->a_flags & FSYNC_WAIT) ? PGO_SYNCIO : 0)); if (error) { goto out; } #ifdef WAPBL KASSERT(vp->v_type == VREG); if (mp->mnt_wapbl) { /* * Don't bother writing out metadata if the syncer is * making the request. We will let the sync vnode * write it out in a single burst through a call to * VFS_SYNC(). */ if ((ap->a_flags & (FSYNC_DATAONLY | FSYNC_LAZY)) != 0) { fstrans_done(mp); return 0; } error = 0; if (vp->v_tag == VT_UFS && VTOI(vp)->i_flag & (IN_ACCESS | IN_CHANGE | IN_UPDATE | IN_MODIFY | IN_MODIFIED | IN_ACCESSED)) { error = UFS_WAPBL_BEGIN(mp); if (error) { fstrans_done(mp); return error; } error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE | ((ap->a_flags & FSYNC_WAIT) ? UPDATE_WAIT : 0)); UFS_WAPBL_END(mp); } if (error || (ap->a_flags & FSYNC_NOLOG) != 0) { fstrans_done(mp); return error; } error = wapbl_flush(mp->mnt_wapbl, 0); fstrans_done(mp); return error; } #endif /* WAPBL */ /* * Then, flush indirect blocks. */ if (blk_high >= NDADDR) { error = ufs_getlbns(vp, blk_high, ia, &num); if (error) goto out; mutex_enter(&bufcache_lock); for (i = 0; i < num; i++) { if ((bp = incore(vp, ia[i].in_lbn)) == NULL) continue; if ((bp->b_cflags & BC_BUSY) != 0 || (bp->b_oflags & BO_DELWRI) == 0) continue; bp->b_cflags |= BC_BUSY | BC_VFLUSH; mutex_exit(&bufcache_lock); bawrite(bp); mutex_enter(&bufcache_lock); } mutex_exit(&bufcache_lock); } if (ap->a_flags & FSYNC_WAIT) { mutex_enter(vp->v_interlock); while (vp->v_numoutput > 0) cv_wait(&vp->v_cv, vp->v_interlock); mutex_exit(vp->v_interlock); } error = ffs_update(vp, NULL, NULL, UPDATE_CLOSE | (((ap->a_flags & (FSYNC_WAIT | FSYNC_DATAONLY)) == FSYNC_WAIT) ? UPDATE_WAIT : 0)); if (error == 0 && ap->a_flags & FSYNC_CACHE) { int l = 0; VOP_IOCTL(VTOI(vp)->i_devvp, DIOCCACHESYNC, &l, FWRITE, curlwp->l_cred); } out: fstrans_done(mp); return error; }