/* * Flush out the buffer cache */ int smbfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct vnode *vp, *mvp; struct smbnode *np; int error, allerror = 0; /* Allocate a marker vnode. */ if ((mvp = vnalloc(mp)) == NULL) return ENOMEM; /* * Force stale buffer cache information to be flushed. */ mutex_enter(&mntvnode_lock); loop: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); /* * If the vnode that we are about to sync is no longer * associated with this mount point, start over. */ if (vp->v_mount != mp || vismarker(vp)) continue; mutex_enter(&vp->v_interlock); np = VTOSMB(vp); if (np == NULL) { mutex_exit(&vp->v_interlock); continue; } if ((vp->v_type == VNON || (np->n_flag & NMODIFIED) == 0) && LIST_EMPTY(&vp->v_dirtyblkhd) && vp->v_uobj.uo_npages == 0) { mutex_exit(&vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK); if (error) { mutex_enter(&mntvnode_lock); if (error == ENOENT) { (void)vunmark(mvp); goto loop; } continue; } error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0); if (error) allerror = error; vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); return (allerror); }
/* livecar - do we need to follow the car? */ LOCAL int livecar(NODE *n) { switch (ntype(n)) { case OBJ: case VECT: vmark(n); case SUBR: case FSUBR: case INT: case FLOAT: case STR: case FPTR: return (FALSE); case SYM: case LIST: return (car(n) != NIL); default: printf("bad node type (%d) found during left scan\n",ntype(n)); osfinish (); exit(1); } /*NOTREACHED*/ }
/* * Q_SYNC - sync quota files to disk. */ int qsync(struct mount *mp) { struct ufsmount *ump = VFSTOUFS(mp); struct vnode *vp, *mvp; struct dquot *dq; int i, error; /* * Check if the mount point has any quotas. * If not, simply return. */ for (i = 0; i < MAXQUOTAS; i++) if (ump->um_quotas[i] != NULLVP) break; if (i == MAXQUOTAS) return (0); /* Allocate a marker vnode. */ if ((mvp = vnalloc(mp)) == NULL) return (ENOMEM); /* * Search vnodes associated with this mount point, * synchronizing any modified dquot structures. */ mutex_enter(&mntvnode_lock); again: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); mutex_enter(&vp->v_interlock); if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) { mutex_exit(&vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT | LK_INTERLOCK); if (error) { mutex_enter(&mntvnode_lock); if (error == ENOENT) { (void)vunmark(mvp); goto again; } continue; } for (i = 0; i < MAXQUOTAS; i++) { dq = VTOI(vp)->i_dquot[i]; if (dq == NODQUOT) continue; mutex_enter(&dq->dq_interlock); if (dq->dq_flags & DQ_MOD) dqsync(vp, dq); mutex_exit(&dq->dq_interlock); } vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); return (0); }
/* * Q_QUOTAOFF - turn off disk quotas for a filesystem. */ int quotaoff(struct lwp *l, struct mount *mp, int type) { struct vnode *vp; struct vnode *qvp, *mvp; struct ufsmount *ump = VFSTOUFS(mp); struct dquot *dq; struct inode *ip; kauth_cred_t cred; int i, error; /* Allocate a marker vnode. */ if ((mvp = vnalloc(mp)) == NULL) return ENOMEM; mutex_enter(&dqlock); while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0) cv_wait(&dqcv, &dqlock); if ((qvp = ump->um_quotas[type]) == NULLVP) { mutex_exit(&dqlock); vnfree(mvp); return (0); } ump->um_qflags[type] |= QTF_CLOSING; mutex_exit(&dqlock); /* * Search vnodes associated with this mount point, * deleting any references to quota file being closed. */ mutex_enter(&mntvnode_lock); again: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); mutex_enter(&vp->v_interlock); if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) { mutex_exit(&vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto again; } ip = VTOI(vp); dq = ip->i_dquot[type]; ip->i_dquot[type] = NODQUOT; dqrele(vp, dq); vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); #ifdef DIAGNOSTIC dqflush(qvp); #endif qvp->v_vflag &= ~VV_SYSTEM; error = vn_close(qvp, FREAD|FWRITE, l->l_cred); mutex_enter(&dqlock); ump->um_quotas[type] = NULLVP; cred = ump->um_cred[type]; ump->um_cred[type] = NOCRED; for (i = 0; i < MAXQUOTAS; i++) if (ump->um_quotas[i] != NULLVP) break; ump->um_qflags[type] &= ~QTF_CLOSING; cv_broadcast(&dqcv); mutex_exit(&dqlock); kauth_cred_free(cred); if (i == MAXQUOTAS) mp->mnt_flag &= ~MNT_QUOTA; return (error); }
/* * Q_QUOTAON - set up a quota file for a particular file system. */ int quotaon(struct lwp *l, struct mount *mp, int type, void *fname) { struct ufsmount *ump = VFSTOUFS(mp); struct vnode *vp, **vpp, *mvp; struct dquot *dq; int error; struct nameidata nd; /* XXX XXX XXX */ if (mp->mnt_wapbl != NULL) { printf("%s: quotas cannot yet be used with -o log\n", mp->mnt_stat.f_mntonname); return (EOPNOTSUPP); } vpp = &ump->um_quotas[type]; NDINIT(&nd, LOOKUP, FOLLOW, UIO_USERSPACE, fname); if ((error = vn_open(&nd, FREAD|FWRITE, 0)) != 0) return (error); vp = nd.ni_vp; VOP_UNLOCK(vp, 0); if (vp->v_type != VREG) { (void) vn_close(vp, FREAD|FWRITE, l->l_cred); return (EACCES); } if (*vpp != vp) quotaoff(l, mp, type); mutex_enter(&dqlock); while ((ump->um_qflags[type] & (QTF_CLOSING | QTF_OPENING)) != 0) cv_wait(&dqcv, &dqlock); ump->um_qflags[type] |= QTF_OPENING; mutex_exit(&dqlock); mp->mnt_flag |= MNT_QUOTA; vp->v_vflag |= VV_SYSTEM; /* XXXSMP */ *vpp = vp; /* * Save the credential of the process that turned on quotas. * Set up the time limits for this quota. */ kauth_cred_hold(l->l_cred); ump->um_cred[type] = l->l_cred; ump->um_btime[type] = MAX_DQ_TIME; ump->um_itime[type] = MAX_IQ_TIME; if (dqget(NULLVP, 0, ump, type, &dq) == 0) { if (dq->dq_btime > 0) ump->um_btime[type] = dq->dq_btime; if (dq->dq_itime > 0) ump->um_itime[type] = dq->dq_itime; dqrele(NULLVP, dq); } /* Allocate a marker vnode. */ if ((mvp = vnalloc(mp)) == NULL) { error = ENOMEM; goto out; } /* * Search vnodes associated with this mount point, * adding references to quota file being opened. * NB: only need to add dquot's for inodes being modified. */ mutex_enter(&mntvnode_lock); again: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); mutex_enter(&vp->v_interlock); if (VTOI(vp) == NULL || vp->v_mount != mp || vismarker(vp) || vp->v_type == VNON || vp->v_writecount == 0 || (vp->v_iflag & (VI_XLOCK | VI_CLEAN)) != 0) { mutex_exit(&vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); if (vget(vp, LK_EXCLUSIVE | LK_INTERLOCK)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto again; } if ((error = getinoquota(VTOI(vp))) != 0) { vput(vp); mutex_enter(&mntvnode_lock); (void)vunmark(mvp); break; } vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); out: mutex_enter(&dqlock); ump->um_qflags[type] &= ~QTF_OPENING; cv_broadcast(&dqcv); mutex_exit(&dqlock); if (error) quotaoff(l, mp, type); return (error); }
int msdosfs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct vnode *vp, *mvp; struct denode *dep; struct msdosfsmount *pmp = VFSTOMSDOSFS(mp); int error, allerror = 0; /* * If we ever switch to not updating all of the FATs all the time, * this would be the place to update them from the first one. */ if (pmp->pm_fmod != 0) { if (pmp->pm_flags & MSDOSFSMNT_RONLY) panic("msdosfs_sync: rofs mod"); else { /* update FATs here */ } } /* Allocate a marker vnode. */ mvp = vnalloc(mp); fstrans_start(mp, FSTRANS_SHARED); /* * Write back each (modified) denode. */ mutex_enter(&mntvnode_lock); loop: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); if (vp->v_mount != mp || vismarker(vp)) continue; mutex_enter(vp->v_interlock); dep = VTODE(vp); if (waitfor == MNT_LAZY || vp->v_type == VNON || dep == NULL || (((dep->de_flag & (DE_ACCESS | DE_CREATE | DE_UPDATE | DE_MODIFIED)) == 0) && (LIST_EMPTY(&vp->v_dirtyblkhd) && UVM_OBJ_IS_CLEAN(&vp->v_uobj)))) { mutex_exit(vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT); if (error) { mutex_enter(&mntvnode_lock); if (error == ENOENT) { (void)vunmark(mvp); goto loop; } continue; } if ((error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0) allerror = error; vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); /* * Force stale file system control information to be flushed. */ if ((error = VOP_FSYNC(pmp->pm_devvp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0) allerror = error; fstrans_done(mp); return (allerror); }
/* * Go through the disk queues to initiate sandbagged IO; * go through the inodes to write those that have been modified; * initiate the writing of the super block if it has been modified. * * Note: we are always called with the filesystem marked `MPBUSY'. */ int ext2fs_sync(struct mount *mp, int waitfor, kauth_cred_t cred) { struct vnode *vp, *mvp; struct inode *ip; struct ufsmount *ump = VFSTOUFS(mp); struct m_ext2fs *fs; int error, allerror = 0; fs = ump->um_e2fs; if (fs->e2fs_fmod != 0 && fs->e2fs_ronly != 0) { /* XXX */ printf("fs = %s\n", fs->e2fs_fsmnt); panic("update: rofs mod"); } /* Allocate a marker vnode. */ mvp = vnalloc(mp); /* * Write back each (modified) inode. */ mutex_enter(&mntvnode_lock); loop: /* * NOTE: not using the TAILQ_FOREACH here since in this loop vgone() * and vclean() can be called indirectly */ for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); if (vp->v_mount != mp || vismarker(vp)) continue; mutex_enter(vp->v_interlock); ip = VTOI(vp); if (ip == NULL || (vp->v_iflag & (VI_XLOCK|VI_CLEAN)) != 0 || vp->v_type == VNON || ((ip->i_flag & (IN_CHANGE | IN_UPDATE | IN_MODIFIED)) == 0 && LIST_EMPTY(&vp->v_dirtyblkhd) && UVM_OBJ_IS_CLEAN(&vp->v_uobj))) { mutex_exit(vp->v_interlock); continue; } mutex_exit(&mntvnode_lock); error = vget(vp, LK_EXCLUSIVE | LK_NOWAIT); if (error) { mutex_enter(&mntvnode_lock); if (error == ENOENT) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto loop; } continue; } if (vp->v_type == VREG && waitfor == MNT_LAZY) error = ext2fs_update(vp, NULL, NULL, 0); else error = VOP_FSYNC(vp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0); if (error) allerror = error; vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); /* * Force stale file system control information to be flushed. */ if (waitfor != MNT_LAZY) { vn_lock(ump->um_devvp, LK_EXCLUSIVE | LK_RETRY); if ((error = VOP_FSYNC(ump->um_devvp, cred, waitfor == MNT_WAIT ? FSYNC_WAIT : 0, 0, 0)) != 0) allerror = error; VOP_UNLOCK(ump->um_devvp); } /* * Write back modified superblock. */ if (fs->e2fs_fmod != 0) { fs->e2fs_fmod = 0; fs->e2fs.e2fs_wtime = time_second; if ((error = ext2fs_cgupdate(ump, waitfor))) allerror = error; } return (allerror); }
/* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) re-read summary information from disk. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. */ int ext2fs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l) { struct vnode *vp, *mvp, *devvp; struct inode *ip; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *newfs; int i, error; void *cp; struct ufsmount *ump; if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EINVAL); ump = VFSTOUFS(mp); /* * Step 1: invalidate all cached meta-data. */ devvp = ump->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(devvp, 0, cred, l, 0, 0); VOP_UNLOCK(devvp); if (error) panic("ext2fs_reload: dirty1"); /* * Step 2: re-read superblock from disk. */ error = bread(devvp, SBLOCK, SBSIZE, NOCRED, 0, &bp); if (error) { return (error); } newfs = (struct ext2fs *)bp->b_data; error = ext2fs_checksb(newfs, (mp->mnt_flag & MNT_RDONLY) != 0); if (error) { brelse(bp, 0); return (error); } fs = ump->um_e2fs; /* * copy in new superblock, and compute in-memory values */ e2fs_sbload(newfs, &fs->e2fs); fs->e2fs_ncg = howmany(fs->e2fs.e2fs_bcount - fs->e2fs.e2fs_first_dblock, fs->e2fs.e2fs_bpg); fs->e2fs_fsbtodb = fs->e2fs.e2fs_log_bsize + LOG_MINBSIZE - DEV_BSHIFT; fs->e2fs_bsize = MINBSIZE << fs->e2fs.e2fs_log_bsize; fs->e2fs_bshift = LOG_MINBSIZE + fs->e2fs.e2fs_log_bsize; fs->e2fs_qbmask = fs->e2fs_bsize - 1; fs->e2fs_bmask = ~fs->e2fs_qbmask; fs->e2fs_ngdb = howmany(fs->e2fs_ncg, fs->e2fs_bsize / sizeof(struct ext2_gd)); fs->e2fs_ipb = fs->e2fs_bsize / EXT2_DINODE_SIZE(fs); fs->e2fs_itpg = fs->e2fs.e2fs_ipg / fs->e2fs_ipb; brelse(bp, 0); /* * Step 3: re-read summary information from disk. */ for (i = 0; i < fs->e2fs_ngdb; i++) { error = bread(devvp , EXT2_FSBTODB(fs, fs->e2fs.e2fs_first_dblock + 1 /* superblock */ + i), fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { return (error); } e2fs_cgload((struct ext2_gd *)bp->b_data, &fs->e2fs_gd[i * fs->e2fs_bsize / sizeof(struct ext2_gd)], fs->e2fs_bsize); brelse(bp, 0); } /* Allocate a marker vnode. */ mvp = vnalloc(mp); /* * NOTE: not using the TAILQ_FOREACH here since in this loop vgone() * and vclean() can be called indirectly */ mutex_enter(&mntvnode_lock); loop: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); if (vp->v_mount != mp || vismarker(vp)) continue; /* * Step 4: invalidate all inactive vnodes. */ if (vrecycle(vp, &mntvnode_lock)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto loop; } /* * Step 5: invalidate all cached file data. */ mutex_enter(vp->v_interlock); mutex_exit(&mntvnode_lock); if (vget(vp, LK_EXCLUSIVE)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto loop; } if (vinvalbuf(vp, 0, cred, l, 0, 0)) panic("ext2fs_reload: dirty2"); /* * Step 6: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { vput(vp); mutex_enter(&mntvnode_lock); (void)vunmark(mvp); break; } cp = (char *)bp->b_data + (ino_to_fsbo(fs, ip->i_number) * EXT2_DINODE_SIZE(fs)); e2fs_iload((struct ext2fs_dinode *)cp, ip->i_din.e2fs_din); ext2fs_set_inode_guid(ip); brelse(bp, 0); vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); return (error); }