void vn_iowake( xfs_inode_t *ip) { if (atomic_dec_and_test(&ip->i_iocount)) wake_up(vptosync(ip)); }
void vn_iowake( struct vnode *vp) { if (atomic_dec_and_test(&vp->v_iocount)) wake_up(vptosync(vp)); }
void vn_iowait( xfs_inode_t *ip) { wait_queue_head_t *wq = vptosync(ip); wait_event(*wq, (atomic_read(&ip->i_iocount) == 0)); }
void vn_iowait( struct vnode *vp) { wait_queue_head_t *wq = vptosync(vp); wait_event(*wq, (atomic_read(&vp->v_iocount) == 0)); }
/* * purge a vnode from the cache * At this point the vnode is guaranteed to have no references (vn_count == 0) * The caller has to make sure that there are no ways someone could * get a handle (via vn_get) on the vnode (usually done via a mount/vfs lock). */ void vn_purge( struct vnode *vp, vmap_t *vmap) { vn_trace_entry(vp, "vn_purge", (inst_t *)__return_address); again: /* * Check whether vp has already been reclaimed since our caller * sampled its version while holding a filesystem cache lock that * its VOP_RECLAIM function acquires. */ VN_LOCK(vp); if (vp->v_number != vmap->v_number) { VN_UNLOCK(vp, 0); return; } /* * If vp is being reclaimed or inactivated, wait until it is inert, * then proceed. Can't assume that vnode is actually reclaimed * just because the reclaimed flag is asserted -- a vn_alloc * reclaim can fail. */ if (vp->v_flag & (VINACT | VRECLM)) { ASSERT(vn_count(vp) == 0); vp->v_flag |= VWAIT; sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0); goto again; } /* * Another process could have raced in and gotten this vnode... */ if (vn_count(vp) > 0) { VN_UNLOCK(vp, 0); return; } XFS_STATS_DEC(vn_active); vp->v_flag |= VRECLM; VN_UNLOCK(vp, 0); /* * Call VOP_RECLAIM and clean vp. The FSYNC_INVAL flag tells * vp's filesystem to flush and invalidate all cached resources. * When vn_reclaim returns, vp should have no private data, * either in a system cache or attached to v_data. */ if (vn_reclaim(vp) != 0) panic("vn_purge: cannot reclaim"); /* * Wakeup anyone waiting for vp to be reclaimed. */ vn_wakeup(vp); }
STATIC void vn_wakeup( struct vnode *vp) { VN_LOCK(vp); if (vp->v_flag & VWAIT) sv_broadcast(vptosync(vp)); vp->v_flag &= ~(VRECLM|VWAIT|VMODIFIED); VN_UNLOCK(vp, 0); }
int vn_wait( struct vnode *vp) { VN_LOCK(vp); if (vp->v_flag & (VINACT | VRECLM)) { vp->v_flag |= VWAIT; sv_wait(vptosync(vp), PINOD, &vp->v_lock, 0); return 1; } VN_UNLOCK(vp, 0); return 0; }
/* * Call VOP_INACTIVE on last reference. */ void vn_rele( struct vnode *vp) { int vcnt; int cache; XFS_STATS_INC(vn_rele); VN_LOCK(vp); vn_trace_entry(vp, "vn_rele", (inst_t *)__return_address); vcnt = vn_count(vp); /* * Since we always get called from put_inode we know * that i_count won't be decremented after we * return. */ if (!vcnt) { /* * As soon as we turn this on, noone can find us in vn_get * until we turn off VINACT or VRECLM */ vp->v_flag |= VINACT; VN_UNLOCK(vp, 0); /* * Do not make the VOP_INACTIVE call if there * are no behaviors attached to the vnode to call. */ if (vp->v_fbhv) VOP_INACTIVE(vp, NULL, cache); VN_LOCK(vp); if (vp->v_flag & VWAIT) sv_broadcast(vptosync(vp)); vp->v_flag &= ~(VINACT|VWAIT|VRECLM|VMODIFIED); } VN_UNLOCK(vp, 0); vn_trace_exit(vp, "vn_rele", (inst_t *)__return_address); }