static int fuse_vfsop_root(struct mount *mp, int lkflags, struct vnode **vpp) { struct fuse_data *data = fuse_get_mpdata(mp); int err = 0; if (data->vroot != NULL) { err = vget(data->vroot, lkflags, curthread); if (err == 0) *vpp = data->vroot; } else { err = fuse_vnode_get(mp, FUSE_ROOT_ID, NULL, vpp, NULL, VDIR); if (err == 0) { FUSE_LOCK(); MPASS(data->vroot == NULL || data->vroot == *vpp); if (data->vroot == NULL) { FS_DEBUG("new root vnode\n"); data->vroot = *vpp; FUSE_UNLOCK(); vref(*vpp); } else if (data->vroot != *vpp) { FS_DEBUG("root vnode race\n"); FUSE_UNLOCK(); VOP_UNLOCK(*vpp, 0); vrele(*vpp); vrecycle(*vpp); *vpp = data->vroot; } else FUSE_UNLOCK(); } } return err; }
int fusefs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct proc *p = ap->a_p; struct fusefs_node *ip = VTOI(vp); struct fusefs_filehandle *fufh = NULL; struct fusefs_mnt *fmp; int error = 0; int type; DPRINTF("fusefs_inactive\n"); fmp = (struct fusefs_mnt *)ip->ufs_ino.i_ump; for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(ip->fufh[type]); if (fufh->fh_type != FUFH_INVALID) fusefs_file_close(fmp, ip, fufh->fh_type, type, (ip->vtype == VDIR), ap->a_p); } VOP_UNLOCK(vp, 0, p); /* not sure if it is ok to do like that ...*/ if (ip->cached_attrs.va_mode == 0) vrecycle(vp, p); return (error); }
int fusefs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct proc *p = curproc; struct ucred *cred = p->p_ucred; struct fusefs_node *ip = VTOI(vp); struct fusefs_filehandle *fufh = NULL; struct fusefs_mnt *fmp; struct vattr vattr; int error = 0; int type; fmp = (struct fusefs_mnt *)ip->ufs_ino.i_ump; for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(ip->fufh[type]); if (fufh->fh_type != FUFH_INVALID) fusefs_file_close(fmp, ip, fufh->fh_type, type, (ip->vtype == VDIR), p); } error = VOP_GETATTR(vp, &vattr, cred); VOP_UNLOCK(vp, 0); if (error) vrecycle(vp, p); return (0); }
/*ARGSUSED*/ static void znode_evict_error(dmu_buf_t *dbuf, void *user_ptr) { #if 1 /* XXXPJD: From OpenSolaris. */ /* * We should never drop all dbuf refs without first clearing * the eviction callback. */ panic("evicting znode %p\n", user_ptr); #else /* XXXPJD */ znode_t *zp = user_ptr; vnode_t *vp; mutex_enter(&zp->z_lock); zp->z_dbuf = NULL; vp = ZTOV(zp); if (vp == NULL) { mutex_exit(&zp->z_lock); zfs_znode_free(zp); } else if (vp->v_count == 0) { zp->z_vnode = NULL; vhold(vp); mutex_exit(&zp->z_lock); vn_lock(vp, LK_EXCLUSIVE | LK_RETRY, curthread); vrecycle(vp, curthread); VOP_UNLOCK(vp, 0, curthread); vdrop(vp); zfs_znode_free(zp); } else { mutex_exit(&zp->z_lock); } #endif }
/* * discard preallocated blocks */ int ext2_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct thread *td = ap->a_td; int mode, error = 0; /* * Ignore inodes related to stale file handles. */ if (ip->i_mode == 0) goto out; if (ip->i_nlink <= 0) { error = ext2_truncate(vp, (off_t)0, 0, NOCRED, td); ip->i_rdev = 0; mode = ip->i_mode; ip->i_mode = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2_vfree(vp, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) ext2_update(vp, 0); out: /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (ip->i_mode == 0) vrecycle(vp); return (error); }
int msdosfs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(vp); struct proc *p = ap->a_p; int error; #ifdef DIAGNOSTIC extern int prtactive; if (prtactive && vp->v_usecount != 0) vprint("msdosfs_inactive(): pushing active", vp); #endif #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): dep %08x, de_Name[0] %x\n", dep, dep->de_Name[0]); #endif error = 0; /* * Get rid of denodes related to stale file handles. */ if (dep->de_Name[0] == SLOT_DELETED) goto out; /* * If the file has been deleted and it is on a read/write * filesystem, then truncate the file, and mark the directory slot * as empty. (This may not be necessary for the dos filesystem.) */ #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): dep %08x, refcnt %d, mntflag %x, MNT_RDONLY %x\n", dep, dep->de_refcnt, vp->v_mount->mnt_flag, MNT_RDONLY); #endif if (dep->de_refcnt <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { error = detrunc(dep, (uint32_t)0, 0, NOCRED, NULL); dep->de_Name[0] = SLOT_DELETED; } deupdat(dep, 0); out: VOP_UNLOCK(vp, 0, p); /* * If we are done with the denode, reclaim it * so that it can be reused immediately. */ #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): v_usecount %d, de_Name[0] %x\n", vp->v_usecount, dep->de_Name[0]); #endif if (dep->de_Name[0] == SLOT_DELETED) vrecycle(vp, p); return (error); }
/* * procfs_inactive is called when the pfsnode * is vrele'd and the reference count goes * to zero. (vp) will be on the vnode free * list, so to get it back vget() must be * used. * * (vp) is locked on entry, but must be unlocked on exit. * * procfs_inactive(struct vnode *a_vp) */ static int procfs_inactive(struct vop_inactive_args *ap) { struct pfsnode *pfs = VTOPFS(ap->a_vp); if (pfs->pfs_pid & PFS_DEAD) vrecycle(ap->a_vp); return (0); }
static int devfs_vop_inactive(struct vop_inactive_args *ap) { struct devfs_node *node = DEVFS_NODE(ap->a_vp); if (node == NULL || (node->flags & DEVFS_NODE_LINKED) == 0) vrecycle(ap->a_vp); return 0; }
static int tmpfs_inactive(struct vop_inactive_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_node *node; struct mount *mp; mp = vp->v_mount; lwkt_gettoken(&mp->mnt_token); node = VP_TO_TMPFS_NODE(vp); /* * Degenerate case */ if (node == NULL) { vrecycle(vp); lwkt_reltoken(&mp->mnt_token); return(0); } /* * Get rid of unreferenced deleted vnodes sooner rather than * later so the data memory can be recovered immediately. * * We must truncate the vnode to prevent the normal reclamation * path from flushing the data for the removed file to disk. */ TMPFS_NODE_LOCK(node); if ((node->tn_vpstate & TMPFS_VNODE_ALLOCATING) == 0 && node->tn_links == 0) { node->tn_vpstate = TMPFS_VNODE_DOOMED; TMPFS_NODE_UNLOCK(node); if (node->tn_type == VREG) tmpfs_truncate(vp, 0); vrecycle(vp); } else { TMPFS_NODE_UNLOCK(node); } lwkt_reltoken(&mp->mnt_token); return 0; }
static int tmpfs_inactive(struct vop_inactive_args *v) { struct vnode *vp; struct tmpfs_node *node; vp = v->a_vp; node = VP_TO_TMPFS_NODE(vp); if (node->tn_links == 0) vrecycle(vp); else tmpfs_check_mtime(vp); return (0); }
/* * There is no way to tell that someone issued remove/rmdir operation * on the underlying filesystem. For now we just have to release lowervp * as soon as possible. * * Note, we can't release any resources nor remove vnode from hash before * appropriate VXLOCK stuff is done because other process can find this * vnode in hash during inactivation and may be sitting in vget() and waiting * for null_inactive to unlock vnode. Thus we will do all those in VOP_RECLAIM. */ static int null_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = ap->a_td; vp->v_object = NULL; /* * If this is the last reference, then free up the vnode * so as not to tie up the lower vnodes. */ vrecycle(vp, td); return (0); }
static int tmpfs_inactive(struct vop_inactive_args *v) { struct vnode *vp = v->a_vp; struct tmpfs_node *node; MPASS(VOP_ISLOCKED(vp)); node = VP_TO_TMPFS_NODE(vp); if (node->tn_links == 0) vrecycle(vp); return 0; }
int msdosfs_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct denode *dep = VTODE(vp); int error = 0; #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): dep %p, de_Name[0] %x\n", dep, dep->de_Name[0]); #endif /* * Ignore denodes related to stale file handles. */ if (dep->de_Name[0] == SLOT_DELETED || dep->de_Name[0] == SLOT_EMPTY) goto out; /* * If the file has been deleted and it is on a read/write * filesystem, then truncate the file, and mark the directory slot * as empty. (This may not be necessary for the dos filesystem.) */ #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): dep %p, refcnt %ld, mntflag %llx, MNT_RDONLY %llx\n", dep, dep->de_refcnt, (unsigned long long)vp->v_mount->mnt_flag, (unsigned long long)MNT_RDONLY); #endif if (dep->de_refcnt <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { error = detrunc(dep, (u_long) 0, 0, NOCRED); dep->de_flag |= DE_UPDATE; dep->de_Name[0] = SLOT_DELETED; } deupdat(dep, 0); out: /* * If we are done with the denode, reclaim it * so that it can be reused immediately. */ #ifdef MSDOSFS_DEBUG printf("msdosfs_inactive(): v_usecount %d, de_Name[0] %x\n", vrefcnt(vp), dep->de_Name[0]); #endif if (dep->de_Name[0] == SLOT_DELETED || dep->de_Name[0] == SLOT_EMPTY) vrecycle(vp); return (error); }
/* * Last reference to an inode. If necessary, write or delete it. */ int ext2fs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct proc *p = ap->a_p; struct timespec ts; int error = 0; #ifdef DIAGNOSTIC extern int prtactive; if (prtactive && vp->v_usecount != 0) vprint("ext2fs_inactive: pushing active", vp); #endif /* Get rid of inodes related to stale file handles. */ if (ip->i_e2din == NULL || ip->i_e2fs_mode == 0 || ip->i_e2fs_dtime) goto out; error = 0; if (ip->i_e2fs_nlink == 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { if (ext2fs_size(ip) != 0) { error = ext2fs_truncate(ip, (off_t)0, 0, NOCRED); } getnanotime(&ts); ip->i_e2fs_dtime = ts.tv_sec; ip->i_flag |= IN_CHANGE | IN_UPDATE; ext2fs_inode_free(ip, ip->i_number, ip->i_e2fs_mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { ext2fs_update(ip, 0); } out: VOP_UNLOCK(vp, p); /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (ip->i_e2din == NULL || ip->i_e2fs_dtime != 0) vrecycle(vp, p); return (error); }
/* * Last reference to an inode. If necessary, write or delete it. * * ufs_inactive(struct vnode *a_vp) */ int ufs_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); int mode, error = 0; if (prtactive && VREFCNT(vp) > 1) vprint("ufs_inactive: pushing active", vp); /* * Ignore inodes related to stale file handles. */ if (ip == NULL || ip->i_mode == 0) goto out; if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { #ifdef QUOTA if (!ufs_getinoquota(ip)) (void)ufs_chkiq(ip, -1, NOCRED, FORCE); #endif /* Must have a VM object to truncate */ error = ffs_truncate(vp, (off_t)0, 0, NOCRED); ip->i_rdev = 0; mode = ip->i_mode; ip->i_mode = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; ffs_vfree(vp, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) ffs_update(vp, 0); out: /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (ip == NULL || ip->i_mode == 0) vrecycle(vp); return (error); }
/* * Last reference to an inode. If necessary, write or delete it. * * ext2_inactive(struct vnode *a_vp) */ int ext2_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); int mode, error = 0; ext2_discard_prealloc(ip); if (prtactive && vp->v_sysref.refcnt > 1) vprint("ext2_inactive: pushing active", vp); /* * Ignore inodes related to stale file handles. */ if (ip == NULL || ip->i_mode == 0) goto out; if (ip->i_nlink <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { #ifdef QUOTA if (!ext2_getinoquota(ip)) (void)ext2_chkiq(ip, -1, NOCRED, FORCE); #endif error = EXT2_TRUNCATE(vp, (off_t)0, 0, NOCRED); ip->i_rdev = 0; mode = ip->i_mode; ip->i_mode = 0; ip->i_flag |= IN_CHANGE | IN_UPDATE; EXT2_VFREE(vp, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) EXT2_UPDATE(vp, 0); out: /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (ip == NULL || ip->i_mode == 0) vrecycle(vp); return (error); }
/* XXX: callinactive can't setback */ static int puffs_vnop_inactive(struct vop_inactive_args *ap) { PUFFS_MSG_VARS(vn, inactive); struct vnode *vp = ap->a_vp; struct puffs_mount *pmp = MPTOPUFFSMP(vp->v_mount); struct puffs_node *pnode = VPTOPP(vp); flushvncache(vp, MNT_NOWAIT); if (doinact(pmp, pnode->pn_stat & PNODE_DOINACT)) { /* * do not wait for reply from userspace, otherwise it may * deadlock. */ PUFFS_MSG_ALLOC(vn, inactive); puffs_msg_setfaf(park_inactive); puffs_msg_setinfo(park_inactive, PUFFSOP_VN, PUFFS_VN_INACTIVE, VPTOPNC(vp)); puffs_msg_enqueue(pmp, park_inactive); PUFFS_MSG_RELEASE(inactive); } pnode->pn_stat &= ~PNODE_DOINACT; /* * file server thinks it's gone? then don't be afraid care, * node's life was already all it would ever be */ if (pnode->pn_stat & PNODE_NOREFS) { pnode->pn_stat |= PNODE_DYING; vrecycle(vp); } return 0; }
/* * Last reference to an node. If necessary, write or delete it. * * hpfs_inactive(struct vnode *a_vp) */ int hpfs_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct hpfsnode *hp = VTOHP(vp); int error; dprintf(("hpfs_inactive(0x%x): \n", hp->h_no)); if (hp->h_flag & H_CHANGE) { dprintf(("hpfs_inactive: node changed, update\n")); error = hpfs_update (hp); if (error) return (error); } if (hp->h_flag & H_PARCHANGE) { dprintf(("hpfs_inactive: parent node changed, update\n")); error = hpfs_updateparent (hp); if (error) return (error); } if (prtactive && vp->v_sysref.refcnt > 1) vprint("hpfs_inactive: pushing active", vp); if (hp->h_flag & H_INVAL) { #if defined(__DragonFly__) vrecycle(vp); #else /* defined(__NetBSD__) */ vgone(vp); #endif return (0); } return (0); }
/* struct vnop_inactive_args { struct vnode *a_vp; struct thread *a_td; }; */ static int fuse_vnop_inactive(struct vop_inactive_args *ap) { struct vnode *vp = ap->a_vp; struct thread *td = ap->a_td; struct fuse_vnode_data *fvdat = VTOFUD(vp); struct fuse_filehandle *fufh = NULL; int type, need_flush = 1; FS_DEBUG("inode=%ju\n", (uintmax_t)VTOI(vp)); for (type = 0; type < FUFH_MAXTYPE; type++) { fufh = &(fvdat->fufh[type]); if (FUFH_IS_VALID(fufh)) { if (need_flush && vp->v_type == VREG) { if ((VTOFUD(vp)->flag & FN_SIZECHANGE) != 0) { fuse_vnode_savesize(vp, NULL); } if (fuse_data_cache_invalidate || (fvdat->flag & FN_REVOKED) != 0) fuse_io_invalbuf(vp, td); else fuse_io_flushbuf(vp, MNT_WAIT, td); need_flush = 0; } fuse_filehandle_close(vp, type, td, NULL); } } if ((fvdat->flag & FN_REVOKED) != 0 && fuse_reclaim_revoked) { vrecycle(vp); } return 0; }
int lfs_bmapv(struct proc *p, fsid_t *fsidp, BLOCK_INFO *blkiov, int blkcnt) { BLOCK_INFO *blkp; IFILE *ifp; struct buf *bp; struct inode *ip = NULL; struct lfs *fs; struct mount *mntp; struct ulfsmount *ump; struct vnode *vp; ino_t lastino; daddr_t v_daddr; int cnt, error; int numrefed = 0; lfs_cleaner_pid = p->p_pid; if ((mntp = vfs_getvfs(fsidp)) == NULL) return (ENOENT); ump = VFSTOULFS(mntp); if ((error = vfs_busy(mntp, NULL)) != 0) return (error); cnt = blkcnt; fs = VFSTOULFS(mntp)->um_lfs; error = 0; /* these were inside the initialization for the for loop */ v_daddr = LFS_UNUSED_DADDR; lastino = LFS_UNUSED_INUM; for (blkp = blkiov; cnt--; ++blkp) { /* * Get the IFILE entry (only once) and see if the file still * exists. */ if (lastino != blkp->bi_inode) { /* * Finish the old file, if there was one. The presence * of a usable vnode in vp is signaled by a valid * v_daddr. */ if (v_daddr != LFS_UNUSED_DADDR) { lfs_vunref(vp); if (VTOI(vp)->i_lfs_iflags & LFSI_BMAP) { mutex_enter(vp->v_interlock); if (vget(vp, LK_NOWAIT) == 0) { if (! vrecycle(vp)) vrele(vp); } } numrefed--; } /* * Start a new file */ lastino = blkp->bi_inode; if (blkp->bi_inode == LFS_IFILE_INUM) v_daddr = fs->lfs_idaddr; else { LFS_IENTRY(ifp, fs, blkp->bi_inode, bp); v_daddr = ifp->if_daddr; brelse(bp, 0); } if (v_daddr == LFS_UNUSED_DADDR) { blkp->bi_daddr = LFS_UNUSED_DADDR; continue; } /* * A regular call to VFS_VGET could deadlock * here. Instead, we try an unlocked access. */ mutex_enter(&ulfs_ihash_lock); vp = ulfs_ihashlookup(ump->um_dev, blkp->bi_inode); if (vp != NULL && !(vp->v_iflag & VI_XLOCK)) { ip = VTOI(vp); mutex_enter(vp->v_interlock); mutex_exit(&ulfs_ihash_lock); if (lfs_vref(vp)) { v_daddr = LFS_UNUSED_DADDR; continue; } numrefed++; } else { mutex_exit(&ulfs_ihash_lock); /* * Don't VFS_VGET if we're being unmounted, * since we hold vfs_busy(). */ if (mntp->mnt_iflag & IMNT_UNMOUNT) { v_daddr = LFS_UNUSED_DADDR; continue; } error = VFS_VGET(mntp, blkp->bi_inode, &vp); if (error) { DLOG((DLOG_CLEAN, "lfs_bmapv: vget ino" "%d failed with %d", blkp->bi_inode,error)); v_daddr = LFS_UNUSED_DADDR; continue; } else { KASSERT(VOP_ISLOCKED(vp)); VTOI(vp)->i_lfs_iflags |= LFSI_BMAP; VOP_UNLOCK(vp); numrefed++; } } ip = VTOI(vp); } else if (v_daddr == LFS_UNUSED_DADDR) { /* * This can only happen if the vnode is dead. * Keep going. Note that we DO NOT set the * bi_addr to anything -- if we failed to get * the vnode, for example, we want to assume * conservatively that all of its blocks *are* * located in the segment in question. * lfs_markv will throw them out if we are * wrong. */ /* blkp->bi_daddr = LFS_UNUSED_DADDR; */ continue; } /* Past this point we are guaranteed that vp, ip are valid. */ if (blkp->bi_lbn == LFS_UNUSED_LBN) { /* * We just want the inode address, which is * conveniently in v_daddr. */ blkp->bi_daddr = v_daddr; } else { daddr_t bi_daddr; /* XXX ondisk32 */ error = VOP_BMAP(vp, blkp->bi_lbn, NULL, &bi_daddr, NULL); if (error) { blkp->bi_daddr = LFS_UNUSED_DADDR; continue; } blkp->bi_daddr = LFS_DBTOFSB(fs, bi_daddr); /* Fill in the block size, too */ if (blkp->bi_lbn >= 0) blkp->bi_size = lfs_blksize(fs, ip, blkp->bi_lbn); else blkp->bi_size = fs->lfs_bsize; } } /* * Finish the old file, if there was one. The presence * of a usable vnode in vp is signaled by a valid v_daddr. */ if (v_daddr != LFS_UNUSED_DADDR) { lfs_vunref(vp); /* Recycle as above. */ if (ip->i_lfs_iflags & LFSI_BMAP) { mutex_enter(vp->v_interlock); if (vget(vp, LK_NOWAIT) == 0) { if (! vrecycle(vp)) vrele(vp); } } numrefed--; } #ifdef DIAGNOSTIC if (numrefed != 0) panic("lfs_bmapv: numrefed=%d", numrefed); #endif vfs_unbusy(mntp, false, NULL); return 0; }
/* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) re-read summary information from disk. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. */ int ext2fs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l) { struct vnode *vp, *mvp, *devvp; struct inode *ip; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *newfs; int i, error; void *cp; struct ufsmount *ump; if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EINVAL); ump = VFSTOUFS(mp); /* * Step 1: invalidate all cached meta-data. */ devvp = ump->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(devvp, 0, cred, l, 0, 0); VOP_UNLOCK(devvp); if (error) panic("ext2fs_reload: dirty1"); /* * Step 2: re-read superblock from disk. */ error = bread(devvp, SBLOCK, SBSIZE, NOCRED, 0, &bp); if (error) { return (error); } newfs = (struct ext2fs *)bp->b_data; error = ext2fs_checksb(newfs, (mp->mnt_flag & MNT_RDONLY) != 0); if (error) { brelse(bp, 0); return (error); } fs = ump->um_e2fs; /* * copy in new superblock, and compute in-memory values */ e2fs_sbload(newfs, &fs->e2fs); fs->e2fs_ncg = howmany(fs->e2fs.e2fs_bcount - fs->e2fs.e2fs_first_dblock, fs->e2fs.e2fs_bpg); fs->e2fs_fsbtodb = fs->e2fs.e2fs_log_bsize + LOG_MINBSIZE - DEV_BSHIFT; fs->e2fs_bsize = MINBSIZE << fs->e2fs.e2fs_log_bsize; fs->e2fs_bshift = LOG_MINBSIZE + fs->e2fs.e2fs_log_bsize; fs->e2fs_qbmask = fs->e2fs_bsize - 1; fs->e2fs_bmask = ~fs->e2fs_qbmask; fs->e2fs_ngdb = howmany(fs->e2fs_ncg, fs->e2fs_bsize / sizeof(struct ext2_gd)); fs->e2fs_ipb = fs->e2fs_bsize / EXT2_DINODE_SIZE(fs); fs->e2fs_itpg = fs->e2fs.e2fs_ipg / fs->e2fs_ipb; brelse(bp, 0); /* * Step 3: re-read summary information from disk. */ for (i = 0; i < fs->e2fs_ngdb; i++) { error = bread(devvp , EXT2_FSBTODB(fs, fs->e2fs.e2fs_first_dblock + 1 /* superblock */ + i), fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { return (error); } e2fs_cgload((struct ext2_gd *)bp->b_data, &fs->e2fs_gd[i * fs->e2fs_bsize / sizeof(struct ext2_gd)], fs->e2fs_bsize); brelse(bp, 0); } /* Allocate a marker vnode. */ mvp = vnalloc(mp); /* * NOTE: not using the TAILQ_FOREACH here since in this loop vgone() * and vclean() can be called indirectly */ mutex_enter(&mntvnode_lock); loop: for (vp = TAILQ_FIRST(&mp->mnt_vnodelist); vp; vp = vunmark(mvp)) { vmark(mvp, vp); if (vp->v_mount != mp || vismarker(vp)) continue; /* * Step 4: invalidate all inactive vnodes. */ if (vrecycle(vp, &mntvnode_lock)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto loop; } /* * Step 5: invalidate all cached file data. */ mutex_enter(vp->v_interlock); mutex_exit(&mntvnode_lock); if (vget(vp, LK_EXCLUSIVE)) { mutex_enter(&mntvnode_lock); (void)vunmark(mvp); goto loop; } if (vinvalbuf(vp, 0, cred, l, 0, 0)) panic("ext2fs_reload: dirty2"); /* * Step 6: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { vput(vp); mutex_enter(&mntvnode_lock); (void)vunmark(mvp); break; } cp = (char *)bp->b_data + (ino_to_fsbo(fs, ip->i_number) * EXT2_DINODE_SIZE(fs)); e2fs_iload((struct ext2fs_dinode *)cp, ip->i_din.e2fs_din); ext2fs_set_inode_guid(ip); brelse(bp, 0); vput(vp); mutex_enter(&mntvnode_lock); } mutex_exit(&mntvnode_lock); vnfree(mvp); return (error); }
/* * Last reference to an inode. If necessary, write or delete it. */ int ufs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct proc *p = ap->a_p; mode_t mode; int error = 0; #ifdef DIAGNOSTIC extern int prtactive; if (prtactive && vp->v_usecount != 0) vprint("ffs_inactive: pushing active", vp); #endif /* * Ignore inodes related to stale file handles. */ if (ip->i_din1 == NULL || DIP(ip, mode) == 0) goto out; if (DIP(ip, nlink) <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { if (getinoquota(ip) == 0) (void)ufs_quota_free_inode(ip, NOCRED); error = UFS_TRUNCATE(ip, (off_t)0, IO_EXT | IO_NORMAL, NOCRED); DIP_ASSIGN(ip, rdev, 0); mode = DIP(ip, mode); DIP_ASSIGN(ip, mode, 0); ip->i_flag |= IN_CHANGE | IN_UPDATE; /* * Setting the mode to zero needs to wait for the inode to be * written just as does a change to the link count. So, rather * than creating a new entry point to do the same thing, we * just use softdep_change_linkcnt(). Also, we can't let * softdep co-opt us to help on its worklist, as we may end up * trying to recycle vnodes and getting to this same point a * couple of times, blowing the kernel stack. However, this * could be optimized by checking if we are coming from * vrele(), vput() or vclean() (by checking for VXLOCK) and * just avoiding the co-opt to happen in the last case. */ if (DOINGSOFTDEP(vp)) softdep_change_linkcnt(ip, 1); UFS_INODE_FREE(ip, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { UFS_UPDATE(ip, 0); } out: VOP_UNLOCK(vp, 0, p); /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (ip->i_din1 == NULL || DIP(ip, mode) == 0) vrecycle(vp, p); return (error); }
/* * Last reference to an inode. If necessary, write or delete it. */ int ufs_inactive(void *v) { struct vop_inactive_args *ap = v; struct vnode *vp = ap->a_vp; struct inode *ip = VTOI(vp); struct fs *fs = ip->i_fs; struct proc *p = curproc; mode_t mode; int error = 0, logged = 0, truncate_error = 0; #ifdef DIAGNOSTIC extern int prtactive; if (prtactive && vp->v_usecount != 0) vprint("ufs_inactive: pushing active", vp); #endif UFS_WAPBL_JUNLOCK_ASSERT(vp->v_mount); /* * Ignore inodes related to stale file handles. */ if (ip->i_din1 == NULL || DIP(ip, mode) == 0) goto out; if (DIP(ip, nlink) <= 0 && (vp->v_mount->mnt_flag & MNT_RDONLY) == 0) { error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; logged = 1; if (getinoquota(ip) == 0) (void)ufs_quota_free_inode(ip, NOCRED); if (DIP(ip, size) != 0 && vp->v_mount->mnt_wapbl) { /* * When journaling, only truncate one indirect block at * a time. */ uint64_t incr = MNINDIR(ip->i_ump) << fs->fs_bshift; uint64_t base = NDADDR << fs->fs_bshift; while (!error && DIP(ip, size) > base + incr) { /* * round down to next full indirect block * boundary. */ uint64_t nsize = base + ((DIP(ip, size) - base - 1) & ~(incr - 1)); error = UFS_TRUNCATE(ip, nsize, 0, NOCRED); if (error) break; UFS_WAPBL_END(vp->v_mount); error = UFS_WAPBL_BEGIN(vp->v_mount); if (error) goto out; } } if (error == 0) { truncate_error = UFS_TRUNCATE(ip, (off_t)0, 0, NOCRED); /* XXX pedro: remove me */ if (truncate_error) printf("UFS_TRUNCATE()=%d\n", truncate_error); } DIP_ASSIGN(ip, rdev, 0); mode = DIP(ip, mode); DIP_ASSIGN(ip, mode, 0); ip->i_flag |= IN_CHANGE | IN_UPDATE; /* * Setting the mode to zero needs to wait for the inode to be * written just as does a change to the link count. So, rather * than creating a new entry point to do the same thing, we * just use softdep_change_linkcnt(). Also, we can't let * softdep co-opt us to help on its worklist, as we may end up * trying to recycle vnodes and getting to this same point a * couple of times, blowing the kernel stack. However, this * could be optimized by checking if we are coming from * vrele(), vput() or vclean() (by checking for VXLOCK) and * just avoiding the co-opt to happen in the last case. */ if (DOINGSOFTDEP(vp)) softdep_change_linkcnt(ip, 1); UFS_INODE_FREE(ip, ip->i_number, mode); } if (ip->i_flag & (IN_ACCESS | IN_CHANGE | IN_MODIFIED | IN_UPDATE)) { if (!logged++) { int err; err = UFS_WAPBL_BEGIN(vp->v_mount); if (err) { error = err; goto out; } } UFS_UPDATE(ip, 0); } if (logged) UFS_WAPBL_END(vp->v_mount); out: VOP_UNLOCK(vp, 0); /* * If we are done with the inode, reclaim it * so that it can be reused immediately. */ if (error == 0 && truncate_error == 0 && (ip->i_din1 == NULL || DIP(ip, mode) == 0)) vrecycle(vp, p); return (truncate_error ? truncate_error : error); }
/* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) re-read summary information from disk. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. */ int ext2fs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l) { struct vnode *vp, *devvp; struct inode *ip; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *newfs; int i, error; void *cp; struct ufsmount *ump; struct vnode_iterator *marker; if ((mp->mnt_flag & MNT_RDONLY) == 0) return (EINVAL); ump = VFSTOUFS(mp); /* * Step 1: invalidate all cached meta-data. */ devvp = ump->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(devvp, 0, cred, l, 0, 0); VOP_UNLOCK(devvp); if (error) panic("ext2fs_reload: dirty1"); /* * Step 2: re-read superblock from disk. */ error = bread(devvp, SBLOCK, SBSIZE, NOCRED, 0, &bp); if (error) { return (error); } newfs = (struct ext2fs *)bp->b_data; error = ext2fs_checksb(newfs, (mp->mnt_flag & MNT_RDONLY) != 0); if (error) { brelse(bp, 0); return (error); } fs = ump->um_e2fs; /* * copy in new superblock, and compute in-memory values */ e2fs_sbload(newfs, &fs->e2fs); fs->e2fs_ncg = howmany(fs->e2fs.e2fs_bcount - fs->e2fs.e2fs_first_dblock, fs->e2fs.e2fs_bpg); fs->e2fs_fsbtodb = fs->e2fs.e2fs_log_bsize + LOG_MINBSIZE - DEV_BSHIFT; fs->e2fs_bsize = MINBSIZE << fs->e2fs.e2fs_log_bsize; fs->e2fs_bshift = LOG_MINBSIZE + fs->e2fs.e2fs_log_bsize; fs->e2fs_qbmask = fs->e2fs_bsize - 1; fs->e2fs_bmask = ~fs->e2fs_qbmask; fs->e2fs_ngdb = howmany(fs->e2fs_ncg, fs->e2fs_bsize / sizeof(struct ext2_gd)); fs->e2fs_ipb = fs->e2fs_bsize / EXT2_DINODE_SIZE(fs); fs->e2fs_itpg = fs->e2fs.e2fs_ipg / fs->e2fs_ipb; brelse(bp, 0); /* * Step 3: re-read summary information from disk. */ for (i = 0; i < fs->e2fs_ngdb; i++) { error = bread(devvp , EXT2_FSBTODB(fs, fs->e2fs.e2fs_first_dblock + 1 /* superblock */ + i), fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { return (error); } e2fs_cgload((struct ext2_gd *)bp->b_data, &fs->e2fs_gd[i * fs->e2fs_bsize / sizeof(struct ext2_gd)], fs->e2fs_bsize); brelse(bp, 0); } vfs_vnode_iterator_init(mp, &marker); while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) { /* * Step 4: invalidate all inactive vnodes. */ if (vrecycle(vp)) continue; /* * Step 5: invalidate all cached file data. */ if (vn_lock(vp, LK_EXCLUSIVE)) { vrele(vp); continue; } if (vinvalbuf(vp, 0, cred, l, 0, 0)) panic("ext2fs_reload: dirty2"); /* * Step 6: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, NOCRED, 0, &bp); if (error) { vput(vp); break; } cp = (char *)bp->b_data + (ino_to_fsbo(fs, ip->i_number) * EXT2_DINODE_SIZE(fs)); e2fs_iload((struct ext2fs_dinode *)cp, ip->i_din.e2fs_din); ext2fs_set_inode_guid(ip); brelse(bp, 0); vput(vp); } vfs_vnode_iterator_destroy(marker); return (error); }
/* * Reload all incore data for a filesystem (used after running fsck on * the root filesystem and finding things to fix). The filesystem must * be mounted read-only. * * Things to do to update the mount: * 1) invalidate all cached meta-data. * 2) re-read superblock from disk. * 3) re-read summary information from disk. * 4) invalidate all inactive vnodes. * 5) invalidate all cached file data. * 6) re-read inode data for all active vnodes. */ int ext2fs_reload(struct mount *mp, kauth_cred_t cred, struct lwp *l) { struct vnode *vp, *devvp; struct inode *ip; struct buf *bp; struct m_ext2fs *fs; struct ext2fs *newfs; int i, error; struct ufsmount *ump; struct vnode_iterator *marker; if ((mp->mnt_flag & MNT_RDONLY) == 0) return EINVAL; ump = VFSTOUFS(mp); /* * Step 1: invalidate all cached meta-data. */ devvp = ump->um_devvp; vn_lock(devvp, LK_EXCLUSIVE | LK_RETRY); error = vinvalbuf(devvp, 0, cred, l, 0, 0); VOP_UNLOCK(devvp); if (error) panic("ext2fs_reload: dirty1"); fs = ump->um_e2fs; /* * Step 2: re-read superblock from disk. Copy in new superblock, and compute * in-memory values. */ error = bread(devvp, SBLOCK, SBSIZE, 0, &bp); if (error) return error; newfs = (struct ext2fs *)bp->b_data; e2fs_sbload(newfs, &fs->e2fs); brelse(bp, 0); error = ext2fs_sbfill(fs, (mp->mnt_flag & MNT_RDONLY) != 0); if (error) return error; /* * Step 3: re-read summary information from disk. */ for (i = 0; i < fs->e2fs_ngdb; i++) { error = bread(devvp , EXT2_FSBTODB(fs, fs->e2fs.e2fs_first_dblock + 1 /* superblock */ + i), fs->e2fs_bsize, 0, &bp); if (error) { return error; } e2fs_cgload((struct ext2_gd *)bp->b_data, &fs->e2fs_gd[i * fs->e2fs_bsize / sizeof(struct ext2_gd)], fs->e2fs_bsize); brelse(bp, 0); } vfs_vnode_iterator_init(mp, &marker); while ((vp = vfs_vnode_iterator_next(marker, NULL, NULL))) { /* * Step 4: invalidate all inactive vnodes. */ if (vrecycle(vp)) continue; /* * Step 5: invalidate all cached file data. */ if (vn_lock(vp, LK_EXCLUSIVE)) { vrele(vp); continue; } if (vinvalbuf(vp, 0, cred, l, 0, 0)) panic("ext2fs_reload: dirty2"); /* * Step 6: re-read inode data for all active vnodes. */ ip = VTOI(vp); error = bread(devvp, EXT2_FSBTODB(fs, ino_to_fsba(fs, ip->i_number)), (int)fs->e2fs_bsize, 0, &bp); if (error) { vput(vp); break; } error = ext2fs_loadvnode_content(fs, ip->i_number, bp, ip); brelse(bp, 0); if (error) { vput(vp); break; } vput(vp); } vfs_vnode_iterator_destroy(marker); return error; }