/* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked `MPBUSY'. */ int ext2fs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { // printf("In file: %s, fun: %s,lineno: %d\n",__FILE__, __func__, __LINE__); struct vnode *vp; struct ufsmount *ump = VFSTOUFS(mp); struct m_ext2fs *fs; struct vnode_iterator *marker; int error, allerror = 0; fs = ump->um_e2fs; if (fs->e2fs_fmod != 0 && fs->e2fs_ronly != 0) { /* XXX */ printf("fs = %s\n", fs->e2fs_fsmnt); panic("update: rofs mod"); } /* * Write back each (modified) inode. */ vfs_vnode_iterator_init(mp, &marker); while ((vp = vfs_vnode_iterator_next(marker, ext2fs_sync_selector, NULL))) { error = vn_lock(vp, LK_EXCLUSIVE); if (error) { vrele(vp); continue; } if (vp->v_type == VREG && waitfor == MNT_LAZY) error = ext2fs_update(vp, NULL, NULL, 0); else error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0); if (error) allerror = error; vput(vp); } vfs_vnode_iterator_destroy(marker); /* * Force stale file system control information to be flushed. */ if (waitfor != MNT_LAZY) { vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp); } /* * Write back modified superblock. */ if (fs->e2fs_fmod != 0) { fs->e2fs_fmod = 0; fs->e2fs.e2fs_wtime = time_second; if ((error = ext2fs_cgupdate(ump, waitfor))) allerror = error; } return (allerror); }
int msdosfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct vnode *vp; struct vnode_iterator *marker; struct denode *dep; struct msdosfsmount *pmp = VFSTOMSDOSFS(mp); int error, allerror = 0; /* * If we ever switch to not updating all of the FATs all the time, * this would be the place to update them from the first one. */ if (pmp->pm_fmod != 0) { if (pmp->pm_flags & MSDOSFSMNT_RONLY) panic("msdosfs_sync: rofs mod"); else { /* update FATs here */ } } fstrans_start(mp, FSTRANS_SHARED); /* * Write back each (modified) denode. */ vfs_vnode_iterator_init(mp, &marker); while (vfs_vnode_iterator_next(marker, &vp)) { error = vn_lock(vp, LK_EXCLUSIVE); if (error) { vrele(vp); continue; } dep = VTODE(vp); if (waitfor == MNT_LAZY || vp->v_type == VNON || dep == NULL || (((dep->de_flag & (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0) && (LIST_EMPTY(&vp->v_dirtyblkhd) && UVM_OBJ_IS_CLEAN(&vp->v_uobj)))) { vput(vp); continue; } if ((error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0) allerror = error; vput(vp); } vfs_vnode_iterator_destroy(marker); /* * Force stale file system control information to be flushed. */ if ((error = VOP_FSYNC(pmp->pm_devvp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0) allerror = error; fstrans_done(mp); return (allerror); }
extern int vnode_fop_fsync( FILE_T *file_p, loff_t start, loff_t end, int datasync ) #endif { INODE_T *ip; int err; CALL_DATA_T cd; #if LINUX_VERSION_CODE >= KERNEL_VERSION(2,6,35) fsync_ctx ctx; #endif #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) if (file_p == NULL) { /* NFSD sometimes calls with null file_p and dentry_p filled in. */ ASSERT(dentry_p != NULL); ip = dentry_p->d_inode; } else #endif ip = file_p->f_dentry->d_inode; ASSERT_I_SEM_MINE(ip); ASSERT(MDKI_INOISOURS(ip)); if (!MDKI_INOISMVFS(ip)) { #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) MDKI_VFS_LOG(VFS_LOG_ERR, "%s shouldn't be called? (files swapped " "at open): file_p=%p dp=%p\n", __func__, file_p, dentry_p); #else MDKI_VFS_LOG(VFS_LOG_ERR, "%s shouldn't be called? (files swapped " "at open): file_p=%p dp=%p\n", __func__, file_p, file_p->f_dentry); #endif return 0; /* don't fail the operation, though */ } mdki_linux_init_call_data(&cd); #if LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) err = VOP_FSYNC(ITOV(ip), datasync == 0 ? FLAG_NODATASYNC : FLAG_DATASYNC, &cd, (file_ctx *)file_p); #else ctx.file_p = file_p; #if !defined (MRG) ctx.start = start; ctx.end = end; #endif /* !defined (MRG) */ err = VOP_FSYNC(ITOV(ip), datasync == 0 ? FLAG_NODATASYNC : FLAG_DATASYNC, &cd, &ctx); #endif /* else LINUX_VERSION_CODE < KERNEL_VERSION(2,6,35) */ err = mdki_errno_unix_to_linux(err); mdki_linux_destroy_call_data(&cd); return err; }
/* * Flush out the buffer cache */ int smbfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct vnode *vp; struct vnode_iterator *marker; struct smbnode *np; int error, allerror = 0; vfs_vnode_iterator_init(mp, &marker); while (vfs_vnode_iterator_next(marker, &vp)) { error = vn_lock(vp, LK_EXCLUSIVE); if (error) { vrele(vp); continue; } np = VTOSMB(vp); if (np == NULL) { vput(vp); continue; } if ((vp->v_type == VNON || (np->n_flag & NMODIFIED) == 0) && LIST_EMPTY(&vp->v_dirtyblkhd) && vp->v_uobj.uo_npages == 0) { vput(vp); continue; } error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0); if (error) allerror = error; vput(vp); } vfs_vnode_iterator_destroy(marker); return (allerror); }
static int vdev_file_io_start(zio_t *zio) { spa_t *spa = zio->io_spa; vdev_t *vd = zio->io_vd; vdev_file_t *vf = vd->vdev_tsd; if (zio->io_type == ZIO_TYPE_IOCTL) { /* XXPOLICY */ if (!vdev_readable(vd)) { zio->io_error = ENXIO; return (ZIO_PIPELINE_CONTINUE); } switch (zio->io_cmd) { case DKIOCFLUSHWRITECACHE: zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, kcred, NULL); break; default: zio->io_error = ENOTSUP; } return (ZIO_PIPELINE_CONTINUE); } spa_taskq_dispatch_ent(spa, ZIO_TYPE_FREE, ZIO_TASKQ_ISSUE, vdev_file_io_strategy, zio, 0, &zio->io_tqent); return (ZIO_PIPELINE_STOP); }
static int kfclose(kfile_t *fp) { int rval; KFDEBUG((CE_CONT, "close: %s\n", fp->kf_fname)); if ((fp->kf_vnflags & FWRITE) && fp->kf_state == 0) { rval = VOP_FSYNC(fp->kf_vp, FSYNC, kcred); if (rval != 0) { KFIOERR((CE_CONT, "%s: sync error %d\n", fp->kf_fname, rval)); } KFDEBUG((CE_CONT, "%s: sync ok\n", fp->kf_fname)); } rval = VOP_CLOSE(fp->kf_vp, fp->kf_vnflags, 1, (offset_t)0, kcred); if (rval != 0) { if (fp->kf_state == 0) { KFIOERR((CE_CONT, "%s: close error %d\n", fp->kf_fname, rval)); } } else { if (fp->kf_state == 0) KFDEBUG((CE_CONT, "%s: close ok\n", fp->kf_fname)); } VN_RELE(fp->kf_vp); kmem_free(fp, sizeof (kfile_t)); return (rval); }
int sysvbfs_close(void *arg) { struct vop_close_args /* { struct vnodeop_desc *a_desc; struct vnode *a_vp; int a_fflag; kauth_cred_t a_cred; } */ *a = arg; struct vnode *v = a->a_vp; struct sysvbfs_node *bnode = v->v_data; struct bfs_fileattr attr; DPRINTF("%s:\n", __func__); if (v->v_mount->mnt_flag & MNT_RDONLY) goto out; uvm_vnp_setsize(v, bnode->size); memset(&attr, 0xff, sizeof attr); /* Set VNOVAL all */ if (bnode->update_atime) attr.atime = time_second; if (bnode->update_ctime) attr.ctime = time_second; if (bnode->update_mtime) attr.mtime = time_second; bfs_inode_set_attr(bnode->bmp->bfs, bnode->inode, &attr); VOP_FSYNC(a->a_vp, a->a_cred, FSYNC_WAIT, 0, 0); out: return 0; }
/* * Flush out all the files in a filesystem. */ int ffs_flushfiles(struct mount *mp, int flags, struct proc *p) { struct ufsmount *ump; int error; ump = VFSTOUFS(mp); if (mp->mnt_flag & MNT_QUOTA) { int i; if ((error = vflush(mp, NULLVP, SKIPSYSTEM|flags)) != 0) return (error); for (i = 0; i < MAXQUOTAS; i++) { if (ump->um_quotas[i] == NULLVP) continue; quotaoff(p, mp, i); } /* * Here we fall through to vflush again to ensure * that we have gotten rid of all the system vnodes. */ } /* * Flush all the files. */ if ((error = vflush(mp, NULL, flags)) != 0) return (error); /* * Flush filesystem metadata. */ vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT, p); VOP_UNLOCK(ump->um_devvp, 0, p); return (error); }
static int vdev_file_io_start(zio_t *zio) { vdev_t *vd = zio->io_vd; vdev_file_t *vf = vd->vdev_tsd; if (zio->io_type == ZIO_TYPE_IOCTL) { /* XXPOLICY */ if (!vdev_readable(vd)) { zio->io_error = SET_ERROR(ENXIO); return (ZIO_PIPELINE_CONTINUE); } switch (zio->io_cmd) { case DKIOCFLUSHWRITECACHE: zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, kcred, NULL); break; default: zio->io_error = SET_ERROR(ENOTSUP); } return (ZIO_PIPELINE_CONTINUE); } VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio, TQ_PUSHPAGE), !=, 0); return (ZIO_PIPELINE_STOP); }
int msdosfs_sync_vnode(struct vnode *vp, void *arg) { struct msdosfs_sync_arg *msa = arg; int error; struct denode *dep; dep = VTODE(vp); if (vp->v_type == VNON || ((dep->de_flag & (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 && LIST_EMPTY(&vp->v_dirtyblkhd)) || msa->waitfor == MNT_LAZY) { return (0); } if (vget(vp, LK_EXCLUSIVE | LK_NOWAIT, msa->p)) return (0); if ((error = VOP_FSYNC(vp, msa->cred, msa->waitfor, msa->p)) != 0) msa->allerror = error; VOP_UNLOCK(vp, 0, msa->p); vrele(vp); return (0); }
/* * Close a mounted file descriptor. * Remove any locks and apply the VOP_CLOSE operation to the vnode for * the file descriptor. */ static int nm_close(vnode_t *vp, int flag, int count, offset_t offset, cred_t *crp, caller_context_t *ct) { struct namenode *nodep = VTONM(vp); int error = 0; (void) cleanlocks(vp, ttoproc(curthread)->p_pid, 0); cleanshares(vp, ttoproc(curthread)->p_pid); error = VOP_CLOSE(nodep->nm_filevp, flag, count, offset, crp, ct); if (count == 1) { (void) VOP_FSYNC(nodep->nm_filevp, FSYNC, crp, ct); /* * Before VN_RELE() we need to remove the vnode from * the hash table. We should only do so in the NMNMNT case. * In other cases, nodep->nm_filep keeps a reference * to nm_filevp and the entry in the hash table doesn't * hurt. */ if ((nodep->nm_flag & NMNMNT) != 0) { mutex_enter(&ntable_lock); nameremove(nodep); mutex_exit(&ntable_lock); } VN_RELE(nodep->nm_filevp); } return (error); }
/* * Called on the *last* close(). * * This function should attempt to avoid returning errors, as handling * them usefully is often not possible. */ static int sfs_close(struct vnode *v) { /* Sync it. */ return VOP_FSYNC(v); }
/* ARGSUSED */ static int nwfs_sync(struct mount *mp, int waitfor) { struct vnode *vp; int error, allerror = 0; /* * Force stale buffer cache information to be flushed. */ loop: for (vp = TAILQ_FIRST(&mp->mnt_nvnodelist); vp != NULL; vp = TAILQ_NEXT(vp, v_nmntvnodes)) { /* * If the vnode that we are about to sync is no longer * associated with this mount point, start over. */ if (vp->v_mount != mp) goto loop; if (vn_islocked(vp) || RB_EMPTY(&vp->v_rbdirty_tree) || (waitfor & MNT_LAZY)) continue; if (vget(vp, LK_EXCLUSIVE)) goto loop; /* XXX vp may not be retained */ error = VOP_FSYNC(vp, waitfor, 0); if (error) allerror = error; vput(vp); } return (allerror); }
static int msdosfs_sync(struct mount *mp, int waitfor) { struct vnode *vp, *nvp; struct thread *td; struct denode *dep; struct msdosfsmount *pmp = VFSTOMSDOSFS(mp); int error, allerror = 0; td = curthread; /* * If we ever switch to not updating all of the fats all the time, * this would be the place to update them from the first one. */ if (pmp->pm_fmod != 0) { if (pmp->pm_flags & MSDOSFSMNT_RONLY) panic("msdosfs_sync: rofs mod"); else { /* update fats here */ } } /* * Write back each (modified) denode. */ MNT_ILOCK(mp); loop: MNT_VNODE_FOREACH(vp, mp, nvp) { VI_LOCK(vp); if (vp->v_type == VNON || (vp->v_iflag & VI_DOOMED)) { VI_UNLOCK(vp); continue; } MNT_IUNLOCK(mp); dep = VTODE(vp); if ((dep->de_flag & (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0 && (vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY)) { VI_UNLOCK(vp); MNT_ILOCK(mp); continue; } error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK, td); if (error) { MNT_ILOCK(mp); if (error == ENOENT) goto loop; continue; } error = VOP_FSYNC(vp, waitfor, td); if (error) allerror = error; VOP_UNLOCK(vp, 0); vrele(vp); MNT_ILOCK(mp); }
/* * Flush out the buffer cache */ int smbfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct vnode *vp, *mvp; struct smbnode *np; int error, allerror = 0; /* Allocate a marker vnode. */ if ((mvp = vnalloc(mp)) == NULL) return ENOMEM; /* * Force stale buffer cache information to be flushed. */ mutex_enter(&mntvnode_lock); loop: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); /* * If the vnode that we are about to sync is no longer * associated with this mount point, start over. */ if (vp->v_mount != mp || vismarker(vp)) continue; mutex_enter(&vp->v_interlock); np = VTOSMB(vp); if (np == NULL) { mutex_exit(&vp->v_interlock); continue; } if ((vp->v_type == VNON || (np->n_flag & NMODIFIED) == 0) && LIST_EMPTY(&vp->v_dirtyblkhd) && vp->v_uobj.uo_npages == 0) { mutex_exit(&vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK); if (error) { mutex_enter(&mntvnode_lock); if (error == ENOENT) { (void)vunmark(mvp); goto loop; } continue; } error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0); if (error) allerror = error; vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); return (allerror); }
static void vdev_file_io_start(zio_t *zio) { vdev_t *vd = zio->io_vd; vdev_file_t *vf = vd->vdev_tsd; ssize_t resid = 0; if (zio->io_type == ZIO_TYPE_IOCTL) { if (!vdev_readable(vd)) { zio->io_error = SET_ERROR(ENXIO); zio_interrupt(zio); return; } switch (zio->io_cmd) { case DKIOCFLUSHWRITECACHE: if (!vnode_getwithvid(vf->vf_vnode, vf->vf_vid)) { zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, kcred, NULL); vnode_put(vf->vf_vnode); } break; default: zio->io_error = SET_ERROR(ENOTSUP); } zio_interrupt(zio); return; } ASSERT(zio->io_type == ZIO_TYPE_READ || zio->io_type == ZIO_TYPE_WRITE); if (!vnode_getwithvid(vf->vf_vnode, vf->vf_vid)) { /* VERIFY3U(taskq_dispatch(vdev_file_taskq, vdev_file_io_strategy, zio, TQ_PUSHPAGE), !=, 0); */ zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ? UIO_READ : UIO_WRITE, vf->vf_vnode, zio->io_data, zio->io_size, zio->io_offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid); vnode_put(vf->vf_vnode); } if (resid != 0 && zio->io_error == 0) zio->io_error = SET_ERROR(ENOSPC); zio_interrupt(zio); return; }
static int in_fflush(File *fp) { int error = 0; if (fp->count) error = in_write(fp->vp, &fp->voffset, fp->buf, fp->count); if (error == 0) error = VOP_FSYNC(fp->vp, FSYNC, CRED(), NULL); return (error); }
static int spa_config_write(spa_config_dirent_t *dp, nvlist_t *nvl) { size_t buflen; char *buf; vnode_t *vp; int oflags = FWRITE | FTRUNC | FCREAT | FOFFMAX; char *temp; int err; /* * If the nvlist is empty (NULL), then remove the old cachefile. */ if (nvl == NULL) { err = vn_remove(dp->scd_path, UIO_SYSSPACE, RMFILE); return (err); } /* * Pack the configuration into a buffer. */ VERIFY(nvlist_size(nvl, &buflen, NV_ENCODE_XDR) == 0); buf = kmem_alloc(buflen, KM_SLEEP); temp = kmem_zalloc(MAXPATHLEN, KM_SLEEP); VERIFY(nvlist_pack(nvl, &buf, &buflen, NV_ENCODE_XDR, KM_SLEEP) == 0); /* * Write the configuration to disk. We need to do the traditional * 'write to temporary file, sync, move over original' to make sure we * always have a consistent view of the data. */ (void) snprintf(temp, MAXPATHLEN, "%s.tmp", dp->scd_path); err = vn_open(temp, UIO_SYSSPACE, oflags, 0644, &vp, CRCREAT, 0); if (err == 0) { err = vn_rdwr(UIO_WRITE, vp, buf, buflen, 0, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, NULL); if (err == 0) err = VOP_FSYNC(vp, FSYNC, kcred, NULL); if (err == 0) err = vn_rename(temp, dp->scd_path, UIO_SYSSPACE); (void) VOP_CLOSE(vp, oflags, 1, 0, kcred, NULL); } (void) vn_remove(temp, UIO_SYSSPACE, RMFILE); kmem_free(buf, buflen); kmem_free(temp, MAXPATHLEN); return (err); }
/* * Since this file system has no disk blocks of its own, apply * the VOP_FSYNC operation on the mounted file descriptor. */ static int nm_sync(vfs_t *vfsp, short flag, cred_t *crp) { struct namenode *nodep; if (vfsp == NULL) return (0); nodep = (struct namenode *)vfsp->vfs_data; if (flag & SYNC_CLOSE) return (nm_umountall(nodep->nm_filevp, crp)); return (VOP_FSYNC(nodep->nm_filevp, FSYNC, crp, NULL)); }
/* * union_fsync(struct vnode *a_vp, struct ucred *a_cred, int a_waitfor, * struct thread *a_td) */ static int union_fsync(struct vop_fsync_args *ap) { int error = 0; struct thread *td = ap->a_td; struct vnode *targetvp; struct union_node *un = VTOUNION(ap->a_vp); if ((targetvp = union_lock_other(un, td)) != NULLVP) { error = VOP_FSYNC(targetvp, ap->a_waitfor, 0); union_unlock_other(targetvp, td); } return (error); }
int RUMP_VOP_FSYNC(struct vnode *vp, struct kauth_cred *cred, int flags, off_t offlo, off_t offhi) { int error; rump_schedule(); error = VOP_FSYNC(vp, cred, flags, offlo, offhi); rump_unschedule(); return error; }
STATIC int xfs_file_fsync( struct file *filp, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; vnode_t *vp = vn_from_inode(inode); int error; int flags = FSYNC_WAIT; if (datasync) flags |= FSYNC_DATA; VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error); return -error; }
static int vdev_file_io_start(zio_t *zio) { spa_t *spa = zio->io_spa; vdev_t *vd = zio->io_vd; vdev_file_t *vf = vd->vdev_tsd; vdev_buf_t *vb; buf_t *bp; if (zio->io_type == ZIO_TYPE_IOCTL) { /* XXPOLICY */ if (!vdev_readable(vd)) { zio->io_error = ENXIO; return (ZIO_PIPELINE_CONTINUE); } switch (zio->io_cmd) { case DKIOCFLUSHWRITECACHE: zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, kcred, NULL); break; default: zio->io_error = ENOTSUP; } return (ZIO_PIPELINE_CONTINUE); } vb = kmem_alloc(sizeof (vdev_buf_t), KM_SLEEP); vb->vb_io = zio; bp = &vb->vb_buf; bioinit(bp); bp->b_flags = (zio->io_type == ZIO_TYPE_READ ? B_READ : B_WRITE); bp->b_bcount = zio->io_size; bp->b_un.b_addr = zio->io_data; bp->b_lblkno = lbtodb(zio->io_offset); bp->b_bufsize = zio->io_size; bp->b_private = vf->vf_vnode; bp->b_iodone = (int (*)())vdev_file_io_intr; taskq_dispatch_ent(spa->spa_zio_taskq[ZIO_TYPE_FREE][ZIO_TASKQ_ISSUE], vdev_file_io_strategy, bp, 0, &zio->io_tqent); return (ZIO_PIPELINE_STOP); }
/* ARGSUSED */ static int nfs_sync(struct mount *mp, int waitfor) { struct vnode *vp, *mvp; struct thread *td; int error, allerror = 0; td = curthread; MNT_ILOCK(mp); /* * If a forced dismount is in progress, return from here so that * the umount(2) syscall doesn't get stuck in VFS_SYNC() before * calling VFS_UNMOUNT(). */ if ((mp->mnt_kern_flag & MNTK_UNMOUNTF) != 0) { MNT_IUNLOCK(mp); return (EBADF); } /* * Force stale buffer cache information to be flushed. */ loop: MNT_VNODE_FOREACH(vp, mp, mvp) { VI_LOCK(vp); MNT_IUNLOCK(mp); /* XXX Racy bv_cnt check. */ if (VOP_ISLOCKED(vp) || vp->v_bufobj.bo_dirty.bv_cnt == 0 || waitfor == MNT_LAZY) { VI_UNLOCK(vp); MNT_ILOCK(mp); continue; } if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK, td)) { MNT_ILOCK(mp); MNT_VNODE_FOREACH_ABORT_ILOCKED(mp, mvp); goto loop; } error = VOP_FSYNC(vp, waitfor, td); if (error) allerror = error; VOP_UNLOCK(vp, 0); vrele(vp); MNT_ILOCK(mp); }
/* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Should always be called with the mount point locked. */ int ext2fs_sync(struct mount *mp, int waitfor, struct ucred *cred, struct proc *p) { struct ufsmount *ump = VFSTOUFS(mp); struct m_ext2fs *fs; int error, allerror = 0; struct ext2fs_sync_args esa; fs = ump->um_e2fs; if (fs->e2fs_ronly != 0) { /* XXX */ printf("fs = %s\n", fs->e2fs_fsmnt); panic("update: rofs mod"); } /* * Write back each (modified) inode. */ esa.p = p; esa.cred = cred; esa.allerror = 0; esa.waitfor = waitfor; vfs_mount_foreach_vnode(mp, ext2fs_sync_vnode, &esa); if (esa.allerror != 0) allerror = esa.allerror; /* * Force stale file system control information to be flushed. */ if (waitfor != MNT_LAZY) { vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp, 0); } /* * Write back modified superblock. */ if (fs->e2fs_fmod != 0) { fs->e2fs_fmod = 0; fs->e2fs.e2fs_wtime = time_second; if ((error = ext2fs_cgupdate(ump, waitfor))) allerror = error; } return (allerror); }
STATIC int linvfs_fsync( struct file *filp, struct dentry *dentry, int datasync) { struct inode *inode = dentry->d_inode; vnode_t *vp = LINVFS_GET_VP(inode); int error; int flags = FSYNC_WAIT; if (datasync) flags |= FSYNC_DATA; ASSERT(vp); VOP_FSYNC(vp, flags, NULL, (xfs_off_t)0, (xfs_off_t)-1, error); return -error; }
static int unionfs_fsync(void *v) { struct vop_fsync_args *ap = v; struct unionfs_node *unp; struct unionfs_node_status *unsp; struct vnode *ovp; unp = VTOUNIONFS(ap->a_vp); unionfs_get_node_status(unp, &unsp); ovp = (unsp->uns_upper_opencnt ? unp->un_uppervp : unp->un_lowervp); unionfs_tryrem_node_status(unp, unsp); if (ovp == NULLVP) return (EBADF); return (VOP_FSYNC(ovp, ap->a_cred, ap->a_flags, ap->a_offlo, ap->a_offhi)); }
int v7fs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct v7fs_mount *v7fsmount = mp->mnt_data; struct v7fs_self *fs = v7fsmount->core; struct v7fs_node *v7fs_node; struct v7fs_inode *inode; struct vnode *v; int err, error; int retry_cnt; DPRINTF("\n"); v7fs_superblock_writeback(fs); for (retry_cnt = 0; retry_cnt < 2; retry_cnt++) { error = 0; mutex_enter(&mntvnode_lock); for (v7fs_node = LIST_FIRST(&v7fsmount->v7fs_node_head); v7fs_node != NULL; v7fs_node = LIST_NEXT(v7fs_node, link)) { inode = &v7fs_node->inode; if (!v7fs_inode_allocated(inode)) { continue; } v = v7fs_node->vnode; mutex_enter(v->v_interlock); mutex_exit(&mntvnode_lock); err = vget(v, LK_EXCLUSIVE | LK_NOWAIT); if (err == 0) { err = VOP_FSYNC(v, cred, FSYNC_WAIT, 0, 0); vput(v); } if (err != 0) error = err; mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); if (error == 0) break; } return error; }
static int vdev_file_io_start(zio_t *zio) { vdev_t *vd = zio->io_vd; vdev_file_t *vf = vd->vdev_tsd; ssize_t resid = 0; if (zio->io_type == ZIO_TYPE_IOCTL) { if (!vdev_readable(vd)) { zio->io_error = ENXIO; return (ZIO_PIPELINE_CONTINUE); } switch (zio->io_cmd) { case DKIOCFLUSHWRITECACHE: vnode_getwithvid(vf->vf_vnode, vf->vf_vid); zio->io_error = VOP_FSYNC(vf->vf_vnode, FSYNC | FDSYNC, kcred, NULL); vnode_put(vf->vf_vnode); break; default: zio->io_error = ENOTSUP; } return (ZIO_PIPELINE_CONTINUE); } vnode_getwithvid(vf->vf_vnode, vf->vf_vid); zio->io_error = vn_rdwr(zio->io_type == ZIO_TYPE_READ ? UIO_READ : UIO_WRITE, vf->vf_vnode, zio->io_data, zio->io_size, zio->io_offset, UIO_SYSSPACE, 0, RLIM64_INFINITY, kcred, &resid); vnode_put(vf->vf_vnode); if (resid != 0 && zio->io_error == 0) zio->io_error = ENOSPC; zio_interrupt(zio); return (ZIO_PIPELINE_STOP); }
/* * Flush out all the files in a filesystem. */ int ext2fs_flushfiles(struct mount *mp, int flags, struct proc *p) { struct ufsmount *ump; int error; ump = VFSTOUFS(mp); /* * Flush all the files. */ if ((error = vflush(mp, NULL, flags)) != 0) return (error); /* * Flush filesystem metadata. */ vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY, p); error = VOP_FSYNC(ump->um_devvp, p->p_ucred, MNT_WAIT); VOP_UNLOCK(ump->um_devvp, 0); return (error); }