/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, but locked, wait for it. * * This subroutine may block. */ struct vnode * ufs_ihashget(struct ufsmount *ump, cdev_t dev, ino_t inum) { struct inode *ip; struct vnode *vp; loop: for (ip = *INOHASH(ump, inum); ip; ip = ip->i_next) { if (inum != ip->i_number || dev != ip->i_dev) continue; vp = ITOV(ip); if (vget(vp, LK_EXCLUSIVE)) goto loop; /* * We must check to see if the inode has been ripped * out from under us after blocking. */ for (ip = *INOHASH(ump, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } if (ip == NULL || ITOV(ip) != vp) { vput(vp); goto loop; } return (vp); } return (NULL); }
/* * Allocate and initialize a directory inode entry. * If requested, save its pertinent mode, owner, and time info. */ static struct inotab * allocinotab(FILE *mf, struct context *ctxp, long seekpt) { struct inotab *itp; struct modeinfo node; itp = calloc(1, sizeof(struct inotab)); if (itp == NULL) panic("no memory directory table\n"); itp->t_next = inotab[INOHASH(ctxp->ino)]; inotab[INOHASH(ctxp->ino)] = itp; itp->t_ino = ctxp->ino; itp->t_seekpt = seekpt; if (mf == NULL) return (itp); node.ino = ctxp->ino; node.mtimep[0].tv_sec = ctxp->atime_sec; node.mtimep[0].tv_usec = ctxp->atime_nsec / 1000; node.mtimep[1].tv_sec = ctxp->mtime_sec; node.mtimep[1].tv_usec = ctxp->mtime_nsec / 1000; node.ctimep[0].tv_sec = ctxp->atime_sec; node.ctimep[0].tv_usec = ctxp->atime_nsec / 1000; node.ctimep[1].tv_sec = ctxp->birthtime_sec; node.ctimep[1].tv_usec = ctxp->birthtime_nsec / 1000; node.mode = ctxp->mode; node.flags = ctxp->file_flags; node.uid = ctxp->uid; node.gid = ctxp->gid; (void)fwrite((char *)&node, 1, sizeof(struct modeinfo), mf); return (itp); }
/* * Allocate and initialize a directory inode entry. * If requested, save its pertinent mode, owner, and time info. */ static struct inotab * allocinotab(ufs1_ino_t ino, struct ufs1_dinode *dip, long seekpt) { struct inotab *itp; struct modeinfo node; itp = calloc(1, sizeof(struct inotab)); if (itp == NULL) panic("no memory directory table\n"); itp->t_next = inotab[INOHASH(ino)]; inotab[INOHASH(ino)] = itp; itp->t_ino = ino; itp->t_seekpt = seekpt; if (mf == NULL) return (itp); node.ino = ino; node.timep[0].tv_sec = dip->di_atime; node.timep[0].tv_usec = dip->di_atimensec / 1000; node.timep[1].tv_sec = dip->di_mtime; node.timep[1].tv_usec = dip->di_mtimensec / 1000; node.mode = dip->di_mode; node.flags = dip->di_flags; node.uid = dip->di_uid; node.gid = dip->di_gid; fwrite((char *)&node, 1, sizeof(struct modeinfo), mf); return (itp); }
void chfs_ihashreinit(void) { struct chfs_inode *ip; struct ihashhead *oldhash, *hash; u_long oldmask, mask, val; int i; dbg("reiniting\n"); hash = hashinit(desiredvnodes, HASH_LIST, true, &mask); mutex_enter(&chfs_ihash_lock); oldhash = chfs_ihashtbl; oldmask = chfs_ihash; chfs_ihashtbl = hash; chfs_ihash = mask; for (i = 0; i <= oldmask; i++) { while ((ip = LIST_FIRST(&oldhash[i])) != NULL) { LIST_REMOVE(ip, hash_entry); val = INOHASH(ip->dev, ip->ino); LIST_INSERT_HEAD(&hash[val], ip, hash_entry); } } mutex_exit(&chfs_ihash_lock); hashdone(oldhash, HASH_LIST, oldmask); }
/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, but locked, wait for it. */ struct vnode * efs_ihashget(dev_t dev, ino_t inum, int flags) { struct ihashhead *ipp; struct efs_inode *eip; struct vnode *vp; loop: mutex_enter(&efs_ihash_lock); ipp = &ihashtbl[INOHASH(dev, inum)]; LIST_FOREACH(eip, ipp, ei_hash) { if (inum == eip->ei_number && dev == eip->ei_dev) { vp = EFS_ITOV(eip); if (flags == 0) { mutex_exit(&efs_ihash_lock); } else { mutex_enter(&vp->v_interlock); mutex_exit(&efs_ihash_lock); if (vget(vp, flags | LK_INTERLOCK)) goto loop; } return (vp); } } mutex_exit(&efs_ihash_lock); return (NULL); }
/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, but locked, wait for it. */ struct vnode * ulfs_ihashget(dev_t dev, ino_t inum, int flags) { struct ihashhead *ipp; struct inode *ip; struct vnode *vp; loop: mutex_enter(&ulfs_ihash_lock); ipp = &ihashtbl[INOHASH(dev, inum)]; LIST_FOREACH(ip, ipp, i_hash) { if (inum == ip->i_number && dev == ip->i_dev) { vp = ITOV(ip); if (flags == 0) { mutex_exit(&ulfs_ihash_lock); } else { mutex_enter(vp->v_interlock); mutex_exit(&ulfs_ihash_lock); if (vget(vp, flags)) goto loop; } return (vp); } } mutex_exit(&ulfs_ihash_lock); return (NULL); }
/* * Look up an inode in the table of directories */ static struct inotab * inotablookup(ino_t ino) { struct inotab *itp; for (itp = inotab[INOHASH(ino)]; itp != NULL; itp = itp->t_next) if (itp->t_ino == ino) return (itp); return (NULL); }
/* * Unhook an attribute directory from a parent file/dir * Only do so, if we are the only user of the vnode. */ void ufs_unhook_shadow(struct inode *ip, struct inode *sip) { struct vnode *datavp = ITOV(ip); struct vnode *dirvp = ITOV(sip); int hno; kmutex_t *ihm; ASSERT(RW_WRITE_HELD(&sip->i_contents)); ASSERT(RW_WRITE_HELD(&ip->i_contents)); if (vn_is_readonly(ITOV(ip))) return; if (ip->i_ufsvfs == NULL || sip->i_ufsvfs == NULL) return; hno = INOHASH(ip->i_number); ihm = &ih_lock[hno]; mutex_enter(ihm); mutex_enter(&datavp->v_lock); mutex_enter(&dirvp->v_lock); if (dirvp->v_count != 1 && datavp->v_count != 1) { mutex_exit(&dirvp->v_lock); mutex_exit(&datavp->v_lock); mutex_exit(ihm); return; } /* * Delete shadow from ip */ sip->i_nlink -= 2; ufs_setreclaim(sip); TRANS_INODE(sip->i_ufsvfs, sip); sip->i_flag |= ICHG; sip->i_seq++; ITIMES_NOLOCK(sip); /* * Update src file */ ip->i_oeftflag = 0; TRANS_INODE(ip->i_ufsvfs, ip); ip->i_flag |= ICHG; ip->i_seq++; ufs_iupdat(ip, 1); mutex_exit(&dirvp->v_lock); mutex_exit(&datavp->v_lock); mutex_exit(ihm); }
/* * Check to see if an inode is in the hash table. This is used to interlock * file free operations to ensure that the vnode is not reused due to a * reallocate of its inode number before we have had a chance to recycle it. */ int ufs_ihashcheck(struct ufsmount *ump, cdev_t dev, ino_t inum) { struct inode *ip; for (ip = *INOHASH(ump, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } return(ip ? 1 : 0); }
/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, return it, even if it is locked. */ struct vnode * ufs_ihashlookup(struct ufsmount *ump, cdev_t dev, ino_t inum) { struct inode *ip = NULL; for (ip = *INOHASH(ump, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } if (ip) return (ITOV(ip)); return (NULLVP); }
/* * Check to see if an inode is in the hash table. This is used to interlock * file free operations to ensure that the vnode is not reused due to a * reallocate of its inode number before we have had a chance to recycle it. */ int ext2_ihashcheck(cdev_t dev, ino_t inum) { struct inode *ip; lwkt_gettoken(&ext2_ihash_token); for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } lwkt_reltoken(&ext2_ihash_token); return(ip ? 1 : 0); }
/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, return it, even if it is locked. */ struct vnode * ext2_ihashlookup(cdev_t dev, ino_t inum) { struct inode *ip; lwkt_gettoken(&ext2_ihash_token); for (ip = *INOHASH(dev, inum); ip; ip = ip->i_next) { if (inum == ip->i_number && dev == ip->i_dev) break; } lwkt_reltoken(&ext2_ihash_token); if (ip) return (ITOV(ip)); return (NULLVP); }
/* * Insert the inode into the hash table, and return it locked. */ void efs_ihashins(struct efs_inode *eip) { struct ihashhead *ipp; KASSERT(mutex_owned(&efs_hashlock)); /* lock the inode, then put it on the appropriate hash list */ vlockmgr(&eip->ei_vp->v_lock, LK_EXCLUSIVE); mutex_enter(&efs_ihash_lock); ipp = &ihashtbl[INOHASH(eip->ei_dev, eip->ei_number)]; LIST_INSERT_HEAD(ipp, eip, ei_hash); mutex_exit(&efs_ihash_lock); }
/* * Insert the inode into the hash table, and return it locked. */ void ulfs_ihashins(struct inode *ip) { struct ihashhead *ipp; int error __diagused; KASSERT(mutex_owned(&ulfs_hashlock)); /* lock the inode, then put it on the appropriate hash list */ error = VOP_LOCK(ITOV(ip), LK_EXCLUSIVE); KASSERT(error == 0); mutex_enter(&ulfs_ihash_lock); ipp = &ihashtbl[INOHASH(ip->i_dev, ip->i_number)]; LIST_INSERT_HEAD(ipp, ip, i_hash); mutex_exit(&ulfs_ihash_lock); }
/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, return it, even if it is locked. */ struct vnode * ufs_ihashlookup(dev_t dev, ino_t inum) { struct inode *ip; struct ihashhead *ipp; KASSERT(mutex_owned(&ufs_ihash_lock)); ipp = &ihashtbl[INOHASH(dev, inum)]; LIST_FOREACH(ip, ipp, i_hash) { if (inum == ip->i_number && dev == ip->i_dev) break; } if (ip) return (ITOV(ip)); return (NULLVP); }
/* * Remove the inode from the hash table. */ void ufs_ihashrem(struct ufsmount *ump, struct inode *ip) { struct inode **ipp; struct inode *iq; if (ip->i_flag & IN_HASHED) { ipp = INOHASH(ump, ip->i_number); while ((iq = *ipp) != NULL) { if (ip == iq) break; ipp = &iq->i_next; } KKASSERT(ip == iq); *ipp = ip->i_next; ip->i_next = NULL; ip->i_flag &= ~IN_HASHED; } }
/* * Insert the inode into the hash table, and return it locked. */ int ufs_ihashins(struct ufsmount *ump, struct inode *ip) { struct inode **ipp; struct inode *iq; KKASSERT((ip->i_flag & IN_HASHED) == 0); ipp = INOHASH(ump, ip->i_number); while ((iq = *ipp) != NULL) { if (ip->i_dev == iq->i_dev && ip->i_number == iq->i_number) { return(EBUSY); } ipp = &iq->i_next; } ip->i_next = NULL; *ipp = ip; ip->i_flag |= IN_HASHED; return(0); }
/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, but locked, wait for it. */ struct vnode * chfs_ihashget(dev_t dev, ino_t inum, int flags) { struct ihashhead *ipp; struct chfs_inode *ip; struct vnode *vp; dbg("search for ino\n"); loop: mutex_enter(&chfs_ihash_lock); ipp = &chfs_ihashtbl[INOHASH(dev, inum)]; dbg("ipp: %p, chfs_ihashtbl: %p, ihash: %lu\n", ipp, chfs_ihashtbl, chfs_ihash); LIST_FOREACH(ip, ipp, hash_entry) { dbg("ip: %p\n", ip); if (inum == ip->ino && dev == ip->dev) { vp = ITOV(ip); KASSERT(vp != NULL); if (VOP_ISLOCKED(vp) == LK_EXCLUSIVE) { mutex_exit(&chfs_ihash_lock); goto loop; } /* if (VOP_ISLOCKED(vp)) dbg("locked\n"); else dbg("isn't locked\n"); */ if (flags == 0) { mutex_exit(&chfs_ihash_lock); } else { mutex_enter(vp->v_interlock); mutex_exit(&chfs_ihash_lock); if (vget(vp, flags)) { goto loop; } } return (vp); } }
/* * Remove the inode from the hash table. */ void ext2_ihashrem(struct inode *ip) { struct inode **ipp; struct inode *iq; lwkt_gettoken(&ext2_ihash_token); if (ip->i_flag & IN_HASHED) { ipp = INOHASH(ip->i_dev, ip->i_number); while ((iq = *ipp) != NULL) { if (ip == iq) break; ipp = &iq->i_next; } KKASSERT(ip == iq); *ipp = ip->i_next; ip->i_next = NULL; ip->i_flag &= ~IN_HASHED; } lwkt_reltoken(&ext2_ihash_token); }
/* * Insert the inode into the hash table, and return it locked. */ int ext2_ihashins(struct inode *ip) { struct inode **ipp; struct inode *iq; KKASSERT((ip->i_flag & IN_HASHED) == 0); lwkt_gettoken(&ext2_ihash_token); ipp = INOHASH(ip->i_dev, ip->i_number); while ((iq = *ipp) != NULL) { if (ip->i_dev == iq->i_dev && ip->i_number == iq->i_number) { lwkt_reltoken(&ext2_ihash_token); return(EBUSY); } ipp = &iq->i_next; } ip->i_next = NULL; *ipp = ip; ip->i_flag |= IN_HASHED; lwkt_reltoken(&ext2_ihash_token); return(0); }
/* * Use the device/inum pair to find the incore inode, and return a pointer * to it. If it is in core, return it, even if it is locked. */ struct vnode * chfs_ihashlookup(dev_t dev, ino_t inum) { struct chfs_inode *ip; struct ihashhead *ipp; dbg("dev: %ju, inum: %ju\n", (uintmax_t )dev, (uintmax_t )inum); KASSERT(mutex_owned(&chfs_ihash_lock)); ipp = &chfs_ihashtbl[INOHASH(dev, inum)]; LIST_FOREACH(ip, ipp, hash_entry) { if (inum == ip->ino && dev == ip->dev) { break; } } if (ip) { return (ITOV(ip)); } return (NULLVP); }
void efs_ihashreinit(void) { struct efs_inode *eip; struct ihashhead *oldhash, *hash; u_long oldmask, mask, val; int i; hash = hashinit(desiredvnodes, HASH_LIST, true, &mask); mutex_enter(&efs_ihash_lock); oldhash = ihashtbl; oldmask = ihash; ihashtbl = hash; ihash = mask; for (i = 0; i <= oldmask; i++) { while ((eip = LIST_FIRST(&oldhash[i])) != NULL) { LIST_REMOVE(eip, ei_hash); val = INOHASH(eip->ei_dev, eip->ei_number); LIST_INSERT_HEAD(&hash[val], eip, ei_hash); } } mutex_exit(&efs_ihash_lock); hashdone(oldhash, HASH_LIST, oldmask); }