/*===========================================================================* * put_vnode * *===========================================================================*/ PUBLIC void put_vnode(struct vnode *vp) { /* Decrease vnode's usage counter and decrease inode's usage counter in the * corresponding FS process. Decreasing the fs_count each time we decrease the * ref count would lead to poor performance. Instead, only decrease fs_count * when the ref count hits zero. However, this could lead to fs_count to wrap. * To prevent this, we drop the counter to 1 when the counter hits 256. * We maintain fs_count as a sanity check to make sure VFS and the FS are in * sync. */ int r, lock_vp; ASSERTVP(vp); /* Lock vnode. It's quite possible this thread already has a lock on this * vnode. That's no problem, because the reference counter will not decrease * to zero in that case. However, if the counter does decrease to zero *and* * is already locked, we have a consistency problem somewhere. */ lock_vp = lock_vnode(vp, VNODE_OPCL); if (vp->v_ref_count > 1) { /* Decrease counter */ vp->v_ref_count--; if (vp->v_fs_count > 256) vnode_clean_refs(vp); if (lock_vp != EBUSY) unlock_vnode(vp); return; } /* If we already had a lock, there is a consistency problem */ assert(lock_vp != EBUSY); tll_upgrade(&vp->v_lock); /* Make sure nobody else accesses this vnode */ /* A vnode that's not in use can't be put back. */ if (vp->v_ref_count <= 0) panic("put_vnode failed: bad v_ref_count %d\n", vp->v_ref_count); /* fs_count should indicate that the file is in use. */ if (vp->v_fs_count <= 0) panic("put_vnode failed: bad v_fs_count %d\n", vp->v_fs_count); /* Tell FS we don't need this inode to be open anymore. */ r = req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count); if (r != OK) { printf("VFS: putnode failed: %d\n", r); util_stacktrace(); } /* This inode could've been mapped. If so, tell mapped FS to close it as * well. If mapped onto same FS, this putnode is not needed. */ if (vp->v_mapfs_e != NONE && vp->v_mapfs_e != vp->v_fs_e) req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count); vp->v_fs_count = 0; vp->v_ref_count = 0; vp->v_mapfs_count = 0; unlock_vnode(vp); }
/*===========================================================================* * dup_vnode * *===========================================================================*/ void dup_vnode(struct vnode *vp) { /* dup_vnode() is called to increment the vnode and therefore the * referred inode's counter. */ ASSERTVP(vp); vp->v_ref_count++; }
/*===========================================================================* * is_vnode_locked * *===========================================================================*/ PUBLIC int is_vnode_locked(struct vnode *vp) { /* Find out whether a thread holds a lock on this vnode or is trying to obtain * a lock. */ ASSERTVP(vp); return(tll_islocked(&vp->v_lock) || tll_haspendinglock(&vp->v_lock)); }
/*===========================================================================* * put_vnode * *===========================================================================*/ PUBLIC void put_vnode(struct vnode *vp) { /* Decrease vnode's usage counter and decrease inode's usage counter in the * corresponding FS process. Decreasing the fs_count each time we decrease the * ref count would lead to poor performance. Instead, only decrease fs_count * when the ref count hits zero. However, this could lead to fs_count to wrap. * To prevent this, we drop the counter to 1 when the counter hits 256. * We maintain fs_count as a sanity check to make sure VFS and the FS are in * sync. */ ASSERTVP(vp); if (vp->v_ref_count > 1) { /* Decrease counter */ vp->v_ref_count--; if (vp->v_fs_count > 256) vnode_clean_refs(vp); return; } /* A vnode that's not in use can't be put. */ if (vp->v_ref_count <= 0) { printf("put_vnode: bad v_ref_count %d\n", vp->v_ref_count); panic(__FILE__, "put_vnode failed", NO_NUM); } /* fs_count should indicate that the file is in use. */ if (vp->v_fs_count <= 0) { printf("put_vnode: bad v_fs_count %d\n", vp->v_fs_count); panic(__FILE__, "put_vnode failed", NO_NUM); } /* Tell FS we don't need this inode to be open anymore. */ req_putnode(vp->v_fs_e, vp->v_inode_nr, vp->v_fs_count); /* This inode could've been mapped. If so, tell PFS to close it as well. */ if(vp->v_mapfs_e != 0 && vp->v_mapinode_nr != vp->v_inode_nr && vp->v_mapfs_e != vp->v_fs_e) { req_putnode(vp->v_mapfs_e, vp->v_mapinode_nr, vp->v_mapfs_count); } vp->v_fs_count = 0; vp->v_ref_count = 0; vp->v_pipe = NO_PIPE; vp->v_sdev = NO_DEV; vp->v_mapfs_e = 0; vp->v_mapinode_nr = 0; vp->v_mapfs_count = 0; }
/*===========================================================================* * lock_vnode * *===========================================================================*/ PUBLIC int lock_vnode(struct vnode *vp, tll_access_t locktype) { int r; ASSERTVP(vp); r = tll_lock(&vp->v_lock, locktype); #if LOCK_DEBUG if (locktype == VNODE_READ) { fp->fp_vp_rdlocks++; } #endif if (r == EBUSY) return(r); return(OK); }
/*===========================================================================* * unlock_vnode * *===========================================================================*/ PUBLIC void unlock_vnode(struct vnode *vp) { #if LOCK_DEBUG int i; register struct vnode *rvp; struct worker_thread *w; #endif ASSERTVP(vp); #if LOCK_DEBUG /* Decrease read-only lock counter when not locked as VNODE_OPCL or * VNODE_WRITE */ if (!tll_locked_by_me(&vp->v_lock)) { fp->fp_vp_rdlocks--; } for (i = 0; i < NR_VNODES; i++) { rvp = &vnode[i]; w = rvp->v_lock.t_write; assert(w != self); while (w && w->w_next != NULL) { w = w->w_next; assert(w != self); } w = rvp->v_lock.t_serial; assert(w != self); while (w && w->w_next != NULL) { w = w->w_next; assert(w != self); } } #endif tll_unlock(&vp->v_lock); }
/*===========================================================================* * vnode * *===========================================================================*/ void upgrade_vnode_lock(struct vnode *vp) { ASSERTVP(vp); tll_upgrade(&vp->v_lock); }