static int vnop_remove_9p(struct vnop_remove_args *ap) { vnode_t dvp, vp; node_9p *dnp, *np; int e; TRACE(); dvp = ap->a_dvp; vp = ap->a_vp; dnp = NTO9P(dvp); np = NTO9P(vp); if (dvp == vp) { panic("parent == node"); return EINVAL; } if (ISSET(ap->a_flags, VNODE_REMOVE_NODELETEBUSY) && vnode_isinuse(vp, 0)) return EBUSY; nlock_9p(dnp, NODE_LCK_EXCLUSIVE); nlock_9p(np, NODE_LCK_EXCLUSIVE); if ((e=remove_9p(np->nmp, np->fid))) goto error; cache_purge(vp); vnode_recycle(vp); error: nunlock_9p(np); nunlock_9p(dnp); return e; }
/* * Make a new or get existing nullfs node. * Vp is the alias vnode, lowervp is the lower vnode. * * lowervp is assumed to have an iocount on it from the caller */ int null_nodeget( struct mount * mp, struct vnode * lowervp, struct vnode * dvp, struct vnode ** vpp, struct componentname * cnp, int root) { struct vnode * vp; int error; /* Lookup the hash firstly. */ error = null_hashget(mp, lowervp, vpp); /* ENOENT means it wasn't found, EIO is a failure we should bail from, 0 is it * was found */ if (error != ENOENT) { /* null_hashget checked the vid, so if we got something here its legit to * the best of our knowledge*/ /* if we found something then there is an iocount on vpp, if we didn't find something then vpp shouldn't be used by the caller */ return error; } /* * We do not serialize vnode creation, instead we will check for * duplicates later, when adding new vnode to hash. */ error = vnode_ref(lowervp); // take a ref on lowervp so we let the system know we care about it if(error) { // Failed to get a reference on the lower vp so bail. Lowervp may be gone already. return error; } error = null_getnewvnode(mp, lowervp, dvp, &vp, cnp, root); if (error) { vnode_rele(lowervp); return (error); } /* * Atomically insert our new node into the hash or vget existing * if someone else has beaten us to it. */ error = null_hashins(mp, VTONULL(vp), vpp); if (error || *vpp != NULL) { /* recycle will call reclaim which will get rid of the internals */ vnode_recycle(vp); vnode_put(vp); /* if we found vpp, then null_hashins put an iocount on it */ return error; } /* vp has an iocount from null_getnewvnode */ *vpp = vp; return (0); }
static int devfs_inactive(__unused struct vnop_inactive_args *ap) { vnode_t vp = ap->a_vp; devnode_t *dnp = VTODN(vp); /* * Cloned vnodes are not linked in anywhere, so they * can just be recycled. */ if (dnp->dn_clone != NULL) { vnode_recycle(vp); } return (0); }
__private_extern__ void fuse_internal_vnode_disappear(vnode_t vp, vfs_context_t context, int how) { int err = 0; fuse_vncache_purge(vp); if (how != REVOKE_NONE) { err = fuse_internal_revoke(vp, REVOKEALL, context, how); if (err) { IOLog("MacFUSE: disappearing act: revoke failed (%d)\n", err); } err = vnode_recycle(vp); if (err) { IOLog("MacFUSE: disappearing act: recycle failed (%d)\n", err); } } }
__private_extern__ void fuse_internal_vnode_disappear(vnode_t vp, vfs_context_t context, int how) { int err = 0; fuse_vncache_purge(vp); if (how != REVOKE_NONE) { err = fuse_internal_revoke(vp, REVOKEALL, context, how); if (err) { IOLog("MacFUSE: disappearing act: revoke failed (%d)\n", err); } #if __LP64__ /* Checking whether the vnode is in the process of being recycled * to avoid the 'vnode reclaim in progress' kernel panic. * * Obviously this is a quick fix done without much understanding of * the code flow of a recycle operation, but it seems that we * shouldn't call this again if a recycle operation was the reason * that we got here. */ if(!vnode_isrecycled(vp)) { #endif err = vnode_recycle(vp); if (err) { IOLog("MacFUSE: disappearing act: recycle failed (%d)\n", err); } #if __LP64__ } else { IOLog("MacFUSE: Avoided 'vnode reclaim in progress' kernel " "panic. What now?\n"); } #endif } }
/*ARGSUSED*/ static int zfs_vfs_unmount(struct mount *mp, int mntflags, vfs_context_t context) { zfsvfs_t *zfsvfs = vfs_fsprivate(mp); objset_t *os = zfsvfs->z_os; znode_t *zp, *nextzp; int ret, i; int flags; /*XXX NOEL: delegation admin stuffs, add back if we use delg. admin */ #if 0 ret = 0; /* UNDEFINED: secpolicy_fs_unmount(cr, vfsp); */ if (ret) { ret = dsl_deleg_access((char *)refstr_value(vfsp->vfs_resource), ZFS_DELEG_PERM_MOUNT, cr); if (ret) return (ret); } /* * We purge the parent filesystem's vfsp as the parent filesystem * and all of its snapshots have their vnode's v_vfsp set to the * parent's filesystem's vfsp. Note, 'z_parent' is self * referential for non-snapshots. */ (void) dnlc_purge_vfsp(zfsvfs->z_parent->z_vfs, 0); #endif /* * Unmount any snapshots mounted under .zfs before unmounting the * dataset itself. */ #if 0 if (zfsvfs->z_ctldir != NULL && (ret = zfsctl_umount_snapshots(vfsp, fflag, cr)) != 0) { return (ret); #endif flags = SKIPSYSTEM; if (mntflags & MNT_FORCE) flags |= FORCECLOSE; ret = vflush(mp, NULLVP, flags); /* * Mac OS X needs a file system modify time * * We use the mtime of the "com.apple.system.mtime" * extended attribute, which is associated with the * file system root directory. * * Here we need to release the ref we took on z_mtime_vp during mount. */ if ((ret == 0) || (mntflags & MNT_FORCE)) { if (zfsvfs->z_mtime_vp != NULL) { struct vnode *mvp; mvp = zfsvfs->z_mtime_vp; zfsvfs->z_mtime_vp = NULL; if (vnode_get(mvp) == 0) { vnode_rele(mvp); vnode_recycle(mvp); vnode_put(mvp); } } } if (!(mntflags & MNT_FORCE)) { /* * Check the number of active vnodes in the file system. * Our count is maintained in the vfs structure, but the * number is off by 1 to indicate a hold on the vfs * structure itself. * * The '.zfs' directory maintains a reference of its * own, and any active references underneath are * reflected in the vnode count. */ if (ret) return (EBUSY); #if 0 if (zfsvfs->z_ctldir == NULL) { if (vfsp->vfs_count > 1) return (EBUSY); } else { if (vfsp->vfs_count > 2 || zfsvfs->z_ctldir->v_count > 1) { return (EBUSY); } } #endif } rw_enter(&zfsvfs->z_unmount_lock, RW_WRITER); rw_enter(&zfsvfs->z_unmount_inactive_lock, RW_WRITER); /* * At this point there are no vops active, and any new vops will * fail with EIO since we have z_unmount_lock for writer (only * relavent for forced unmount). * * Release all holds on dbufs. * Note, the dmu can still callback via znode_pageout_func() * which can zfs_znode_free() the znode. So we lock * z_all_znodes; search the list for a held dbuf; drop the lock * (we know zp can't disappear if we hold a dbuf lock) then * regrab the lock and restart. */ mutex_enter(&zfsvfs->z_znodes_lock); for (zp = list_head(&zfsvfs->z_all_znodes); zp; zp = nextzp) { nextzp = list_next(&zfsvfs->z_all_znodes, zp); if (zp->z_dbuf_held) { /* dbufs should only be held when force unmounting */ zp->z_dbuf_held = 0; mutex_exit(&zfsvfs->z_znodes_lock); dmu_buf_rele(zp->z_dbuf, NULL); /* Start again */ mutex_enter(&zfsvfs->z_znodes_lock); nextzp = list_head(&zfsvfs->z_all_znodes); } } mutex_exit(&zfsvfs->z_znodes_lock); /* * Set the unmounted flag and let new vops unblock. * zfs_inactive will have the unmounted behavior, and all other * vops will fail with EIO. */ zfsvfs->z_unmounted = B_TRUE; rw_exit(&zfsvfs->z_unmount_lock); rw_exit(&zfsvfs->z_unmount_inactive_lock); /* * Unregister properties. */ #ifndef __APPLE__ if (!dmu_objset_is_snapshot(os)) zfs_unregister_callbacks(zfsvfs); #endif /* * Close the zil. NB: Can't close the zil while zfs_inactive * threads are blocked as zil_close can call zfs_inactive. */ if (zfsvfs->z_log) { zil_close(zfsvfs->z_log); zfsvfs->z_log = NULL; } /* * Evict all dbufs so that cached znodes will be freed */ if (dmu_objset_evict_dbufs(os, B_TRUE)) { txg_wait_synced(dmu_objset_pool(zfsvfs->z_os), 0); (void) dmu_objset_evict_dbufs(os, B_FALSE); } /* * Finally close the objset */ dmu_objset_close(os); /* * We can now safely destroy the '.zfs' directory node. */ #if 0 if (zfsvfs->z_ctldir != NULL) zfsctl_destroy(zfsvfs); #endif /* * Note that this work is normally done in zfs_freevfs, but since * there is no VOP_FREEVFS in OSX, we free VFS items here */ OSDecrementAtomic((SInt32 *)&zfs_active_fs_count); for (i = 0; i != ZFS_OBJ_MTX_SZ; i++) mutex_destroy(&zfsvfs->z_hold_mtx[i]); mutex_destroy(&zfsvfs->z_znodes_lock); list_destroy(&zfsvfs->z_all_znodes); rw_destroy(&zfsvfs->z_unmount_lock); rw_destroy(&zfsvfs->z_unmount_inactive_lock); return (0); } struct vnode* vnode_getparent(struct vnode *vp); /* sys/vnode_internal.h */ static int zfs_vget_internal(zfsvfs_t *zfsvfs, ino64_t ino, struct vnode **vpp) { struct vnode *vp; struct vnode *dvp = NULL; znode_t *zp; int error; *vpp = NULL; /* * On Mac OS X we always export the root directory id as 2 * and its parent as 1 */ if (ino == 2 || ino == 1) ino = zfsvfs->z_root; if ((error = zfs_zget(zfsvfs, ino, &zp))) goto out; /* Don't expose EA objects! */ if (zp->z_phys->zp_flags & ZFS_XATTR) { vnode_put(ZTOV(zp)); error = ENOENT; goto out; } *vpp = vp = ZTOV(zp); if (vnode_isvroot(vp)) goto out; /* * If this znode didn't just come from the cache then * it won't have a valid identity (parent and name). * * Manually fix its identity here (normally done by namei lookup). */ if ((dvp = vnode_getparent(vp)) == NULL) { if (zp->z_phys->zp_parent != 0 && zfs_vget_internal(zfsvfs, zp->z_phys->zp_parent, &dvp)) { goto out; } if ( vnode_isdir(dvp) ) { char objname[ZAP_MAXNAMELEN]; /* 256 bytes */ int flags = VNODE_UPDATE_PARENT; /* Look for znode's name in its parent's zap */ if ( zap_value_search(zfsvfs->z_os, zp->z_phys->zp_parent, zp->z_id, ZFS_DIRENT_OBJ(-1ULL), objname) == 0 ) { flags |= VNODE_UPDATE_NAME; } /* Update the znode's parent and name */ vnode_update_identity(vp, dvp, objname, 0, 0, flags); } } /* All done with znode's parent */ vnode_put(dvp); out: return (error); } /* * Get a vnode from a file id (ignoring the generation) * * Use by NFS Server (readdirplus) and VFS (build_path) */ static int zfs_vfs_vget(struct mount *mp, ino64_t ino, struct vnode **vpp, __unused vfs_context_t context) { zfsvfs_t *zfsvfs = vfs_fsprivate(mp); int error; ZFS_ENTER(zfsvfs); /* * On Mac OS X we always export the root directory id as 2. * So we don't expect to see the real root directory id * from zfs_vfs_vget KPI (unless of course the real id was * already 2). */ if ((ino == zfsvfs->z_root) && (zfsvfs->z_root != 2)) { ZFS_EXIT(zfsvfs); return (ENOENT); } error = zfs_vget_internal(zfsvfs, ino, vpp); ZFS_EXIT(zfsvfs); return (error); }
/* * Mount null layer */ static int nullfs_mount(struct mount * mp, __unused vnode_t devvp, user_addr_t user_data, vfs_context_t ctx) { int error = 0; struct vnode *lowerrootvp = NULL, *vp = NULL; struct vfsstatfs * sp = NULL; struct null_mount * xmp = NULL; char data[MAXPATHLEN]; size_t count; struct vfs_attr vfa; /* set defaults (arbitrary since this file system is readonly) */ uint32_t bsize = BLKDEV_IOSIZE; size_t iosize = BLKDEV_IOSIZE; uint64_t blocks = 4711 * 4711; uint64_t bfree = 0; uint64_t bavail = 0; uint64_t bused = 4711; uint64_t files = 4711; uint64_t ffree = 0; kauth_cred_t cred = vfs_context_ucred(ctx); NULLFSDEBUG("nullfs_mount(mp = %p) %llx\n", (void *)mp, vfs_flags(mp)); if (vfs_flags(mp) & MNT_ROOTFS) return (EOPNOTSUPP); /* * Update is a no-op */ if (vfs_isupdate(mp)) { return ENOTSUP; } /* check entitlement */ if (!IOTaskHasEntitlement(current_task(), NULLFS_ENTITLEMENT)) { return EPERM; } /* * Get argument */ error = copyinstr(user_data, data, MAXPATHLEN - 1, &count); if (error) { NULLFSDEBUG("nullfs: error copying data form user %d\n", error); goto error; } /* This could happen if the system is configured for 32 bit inodes instead of * 64 bit */ if (count > MAX_MNT_FROM_LENGTH) { error = EINVAL; NULLFSDEBUG("nullfs: path to translocate too large for this system %d vs %d\n", count, MAX_MNT_FROM_LENGTH); goto error; } error = vnode_lookup(data, 0, &lowerrootvp, ctx); if (error) { NULLFSDEBUG("lookup %s -> %d\n", data, error); goto error; } /* lowervrootvp has an iocount after vnode_lookup, drop that for a usecount. Keep this to signal what we want to keep around the thing we are mirroring. Drop it in unmount.*/ error = vnode_ref(lowerrootvp); vnode_put(lowerrootvp); if (error) { // If vnode_ref failed, then null it out so it can't be used anymore in cleanup. lowerrootvp = NULL; goto error; } NULLFSDEBUG("mount %s\n", data); MALLOC(xmp, struct null_mount *, sizeof(*xmp), M_TEMP, M_WAITOK | M_ZERO); if (xmp == NULL) { error = ENOMEM; goto error; } /* * Save reference to underlying FS */ xmp->nullm_lowerrootvp = lowerrootvp; xmp->nullm_lowerrootvid = vnode_vid(lowerrootvp); error = null_getnewvnode(mp, NULL, NULL, &vp, NULL, 1); if (error) { goto error; } /* vp has an iocount on it from vnode_create. drop that for a usecount. This * is our root vnode so we drop the ref in unmount * * Assuming for now that because we created this vnode and we aren't finished mounting we can get a ref*/ vnode_ref(vp); vnode_put(vp); error = nullfs_init_lck(&xmp->nullm_lock); if (error) { goto error; } xmp->nullm_rootvp = vp; /* read the flags the user set, but then ignore some of them, we will only allow them if they are set on the lower file system */ uint64_t flags = vfs_flags(mp) & (~(MNT_IGNORE_OWNERSHIP | MNT_LOCAL)); uint64_t lowerflags = vfs_flags(vnode_mount(lowerrootvp)) & (MNT_LOCAL | MNT_QUARANTINE | MNT_IGNORE_OWNERSHIP | MNT_NOEXEC); if (lowerflags) { flags |= lowerflags; } /* force these flags */ flags |= (MNT_DONTBROWSE | MNT_MULTILABEL | MNT_NOSUID | MNT_RDONLY); vfs_setflags(mp, flags); vfs_setfsprivate(mp, xmp); vfs_getnewfsid(mp); vfs_setlocklocal(mp); /* fill in the stat block */ sp = vfs_statfs(mp); strlcpy(sp->f_mntfromname, data, MAX_MNT_FROM_LENGTH); sp->f_flags = flags; xmp->nullm_flags = NULLM_CASEINSENSITIVE; /* default to case insensitive */ error = nullfs_vfs_getlowerattr(vnode_mount(lowerrootvp), &vfa, ctx); if (error == 0) { if (VFSATTR_IS_SUPPORTED(&vfa, f_bsize)) { bsize = vfa.f_bsize; } if (VFSATTR_IS_SUPPORTED(&vfa, f_iosize)) { iosize = vfa.f_iosize; } if (VFSATTR_IS_SUPPORTED(&vfa, f_blocks)) { blocks = vfa.f_blocks; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bfree)) { bfree = vfa.f_bfree; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bavail)) { bavail = vfa.f_bavail; } if (VFSATTR_IS_SUPPORTED(&vfa, f_bused)) { bused = vfa.f_bused; } if (VFSATTR_IS_SUPPORTED(&vfa, f_files)) { files = vfa.f_files; } if (VFSATTR_IS_SUPPORTED(&vfa, f_ffree)) { ffree = vfa.f_ffree; } if (VFSATTR_IS_SUPPORTED(&vfa, f_capabilities)) { if ((vfa.f_capabilities.capabilities[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE)) && (vfa.f_capabilities.valid[VOL_CAPABILITIES_FORMAT] & (VOL_CAP_FMT_CASE_SENSITIVE))) { xmp->nullm_flags &= ~NULLM_CASEINSENSITIVE; } } } else { goto error; } sp->f_bsize = bsize; sp->f_iosize = iosize; sp->f_blocks = blocks; sp->f_bfree = bfree; sp->f_bavail = bavail; sp->f_bused = bused; sp->f_files = files; sp->f_ffree = ffree; /* Associate the mac label information from the mirrored filesystem with the * mirror */ MAC_PERFORM(mount_label_associate, cred, vnode_mount(lowerrootvp), vfs_mntlabel(mp)); NULLFSDEBUG("nullfs_mount: lower %s, alias at %s\n", sp->f_mntfromname, sp->f_mntonname); return (0); error: if (xmp) { FREE(xmp, M_TEMP); } if (lowerrootvp) { vnode_getwithref(lowerrootvp); vnode_rele(lowerrootvp); vnode_put(lowerrootvp); } if (vp) { /* we made the root vnode but the mount is failed, so clean it up */ vnode_getwithref(vp); vnode_rele(vp); /* give vp back */ vnode_recycle(vp); vnode_put(vp); } return error; }
/* * Unlink zp from dl, and mark zp for deletion if this was the last link. * Can fail if zp is a mount point (EBUSY) or a non-empty directory (EEXIST). * If 'unlinkedp' is NULL, we put unlinked znodes on the unlinked list. * If it's non-NULL, we use it to indicate whether the znode needs deletion, * and it's the caller's job to do it. */ int zfs_link_destroy(zfs_dirlock_t *dl, znode_t *zp, dmu_tx_t *tx, int flag, boolean_t *unlinkedp) { znode_t *dzp = dl->dl_dzp; zfsvfs_t *zfsvfs = dzp->z_zfsvfs; vnode_t *vp = ZTOV(zp); #ifdef __APPLE__ //int zp_is_dir = S_ISDIR(zp->z_phys->zp_mode); int zp_is_dir = S_ISDIR(zp->z_mode); #else int zp_is_dir = (vp->v_type == VDIR); #endif boolean_t unlinked = B_FALSE; sa_bulk_attr_t bulk[5]; uint64_t mtime[2], ctime[2]; int count = 0; int error; #ifndef __APPLE__ dnlc_remove(ZTOV(dzp), dl->dl_name); #endif if (!(flag & ZRENAMING)) { if (vp) { if (vn_vfswlock(vp)) /* prevent new mounts on zp */ return (EBUSY); if (vn_ismntpt(vp)) { /* don't remove mount point */ vn_vfsunlock(vp); return (EBUSY); } } /* if (vp) */ mutex_enter(&zp->z_lock); if (zp_is_dir && !zfs_dirempty(zp)) { /* dir not empty */ mutex_exit(&zp->z_lock); #ifdef __APPLE__ return (ENOTEMPTY); #else vn_vfsunlock(vp); return (EEXIST); #endif } /* * If we get here, we are going to try to remove the object. * First try removing the name from the directory; if that * fails, return the error. */ error = zfs_dropname(dl, zp, dzp, tx, flag); if (error != 0) { mutex_exit(&zp->z_lock); //vn_vfsunlock(vp); return (error); } if (zp->z_links <= zp_is_dir) { #ifndef __APPLE__ zfs_panic_recover("zfs: link count on %s is %u, " "should be at least %u", zp->z_vnode->v_path ? zp->z_vnode->v_path : "<unknown>", (int)zp->z_links, zp_is_dir + 1); #endif zp->z_links = zp_is_dir + 1; } if (--zp->z_links == zp_is_dir) { zp->z_unlinked = B_TRUE; zp->z_links = 0; unlinked = B_TRUE; } else { SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, &ctime, sizeof (ctime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &zp->z_pflags, sizeof (zp->z_pflags)); zfs_tstamp_update_setup(zp, STATE_CHANGED, mtime, ctime, B_TRUE); } SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &zp->z_links, sizeof (zp->z_links)); error = sa_bulk_update(zp->z_sa_hdl, bulk, count, tx); count = 0; ASSERT(error == 0); mutex_exit(&zp->z_lock); #ifndef __APPLE__ vn_vfsunlock(vp); #endif } else { error = zfs_dropname(dl, zp, dzp, tx, flag); if (error != 0) return (error); } mutex_enter(&dzp->z_lock); dzp->z_size--; /* one dirent removed */ dzp->z_links -= zp_is_dir; /* ".." link from zp */ SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_LINKS(zfsvfs), NULL, &dzp->z_links, sizeof (dzp->z_links)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_SIZE(zfsvfs), NULL, &dzp->z_size, sizeof (dzp->z_size)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_CTIME(zfsvfs), NULL, ctime, sizeof (ctime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_MTIME(zfsvfs), NULL, mtime, sizeof (mtime)); SA_ADD_BULK_ATTR(bulk, count, SA_ZPL_FLAGS(zfsvfs), NULL, &dzp->z_pflags, sizeof (dzp->z_pflags)); zfs_tstamp_update_setup(dzp, CONTENT_MODIFIED, mtime, ctime, B_TRUE); error = sa_bulk_update(dzp->z_sa_hdl, bulk, count, tx); ASSERT(error == 0); mutex_exit(&dzp->z_lock); /* Inform VFS to dump this znode as soon as possible... */ if (unlinked && ZTOV(zp)) vnode_recycle(ZTOV(zp)); //error = zap_remove(zp->z_zfsvfs->z_os, dzp->z_id, dl->dl_name, tx); //ASSERT(error == 0); if (unlinkedp != NULL) *unlinkedp = unlinked; else if (unlinked) zfs_unlinked_add(zp, tx); return (0); }
static int ClearCallBack(struct rx_connection *a_conn, struct AFSFid *a_fid) { struct vcache *tvc; int i; struct VenusFid localFid; struct volume *tv; #ifdef AFS_DARWIN80_ENV vnode_t vp; #endif AFS_STATCNT(ClearCallBack); AFS_ASSERT_GLOCK(); /* * XXXX Don't hold any server locks here because of callback protocol XXX */ localFid.Cell = 0; localFid.Fid.Volume = a_fid->Volume; localFid.Fid.Vnode = a_fid->Vnode; localFid.Fid.Unique = a_fid->Unique; /* * Volume ID of zero means don't do anything. */ if (a_fid->Volume != 0) { if (a_fid->Vnode == 0) { struct afs_q *tq, *uq; /* * Clear callback for the whole volume. Zip through the * hash chain, nullifying entries whose volume ID matches. */ loop1: ObtainReadLock(&afs_xvcache); i = VCHashV(&localFid); for (tq = afs_vhashTV[i].prev; tq != &afs_vhashTV[i]; tq = uq) { uq = QPrev(tq); tvc = QTOVH(tq); if (tvc->f.fid.Fid.Volume == a_fid->Volume) { tvc->callback = NULL; if (!localFid.Cell) localFid.Cell = tvc->f.fid.Cell; tvc->dchint = NULL; /* invalidate hints */ if (tvc->f.states & CVInit) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop1; } #if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_HPUX_ENV) || defined(AFS_LINUX20_ENV) AFS_FAST_HOLD(tvc); #else #ifdef AFS_DARWIN80_ENV if (tvc->f.states & CDeadVnode) { if (!(tvc->f.states & CBulkFetching)) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop1; } } vp = AFSTOV(tvc); if (vnode_get(vp)) continue; if (vnode_ref(vp)) { AFS_GUNLOCK(); vnode_put(vp); AFS_GLOCK(); continue; } if (tvc->f.states & (CBulkFetching|CDeadVnode)) { AFS_GUNLOCK(); vnode_recycle(AFSTOV(tvc)); AFS_GLOCK(); } #else AFS_FAST_HOLD(tvc); #endif #endif ReleaseReadLock(&afs_xvcache); ObtainWriteLock(&afs_xcbhash, 449); afs_DequeueCallback(tvc); tvc->f.states &= ~(CStatd | CUnique | CBulkFetching); afs_allCBs++; if (tvc->f.fid.Fid.Vnode & 1) afs_oddCBs++; else afs_evenCBs++; ReleaseWriteLock(&afs_xcbhash); if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))) osi_dnlc_purgedp(tvc); afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, tvc->f.states, ICL_TYPE_INT32, a_fid->Volume); #ifdef AFS_DARWIN80_ENV vnode_put(AFSTOV(tvc)); #endif ObtainReadLock(&afs_xvcache); uq = QPrev(tq); AFS_FAST_RELE(tvc); } else if ((tvc->f.states & CMValid) && (tvc->mvid->Fid.Volume == a_fid->Volume)) { tvc->f.states &= ~CMValid; if (!localFid.Cell) localFid.Cell = tvc->mvid->Cell; } } ReleaseReadLock(&afs_xvcache); /* * XXXX Don't hold any locks here XXXX */ tv = afs_FindVolume(&localFid, 0); if (tv) { afs_ResetVolumeInfo(tv); afs_PutVolume(tv, 0); /* invalidate mtpoint? */ } } /*Clear callbacks for whole volume */ else { /* * Clear callbacks just for the one file. */ struct vcache *uvc; afs_allCBs++; if (a_fid->Vnode & 1) afs_oddCBs++; /*Could do this on volume basis, too */ else afs_evenCBs++; /*A particular fid was specified */ loop2: ObtainReadLock(&afs_xvcache); i = VCHash(&localFid); for (tvc = afs_vhashT[i]; tvc; tvc = uvc) { uvc = tvc->hnext; if (tvc->f.fid.Fid.Vnode == a_fid->Vnode && tvc->f.fid.Fid.Volume == a_fid->Volume && tvc->f.fid.Fid.Unique == a_fid->Unique) { tvc->callback = NULL; tvc->dchint = NULL; /* invalidate hints */ if (tvc->f.states & CVInit) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop2; } #if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_HPUX_ENV) || defined(AFS_LINUX20_ENV) AFS_FAST_HOLD(tvc); #else #ifdef AFS_DARWIN80_ENV if (tvc->f.states & CDeadVnode) { if (!(tvc->f.states & CBulkFetching)) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop2; } } vp = AFSTOV(tvc); if (vnode_get(vp)) continue; if (vnode_ref(vp)) { AFS_GUNLOCK(); vnode_put(vp); AFS_GLOCK(); continue; } if (tvc->f.states & (CBulkFetching|CDeadVnode)) { AFS_GUNLOCK(); vnode_recycle(AFSTOV(tvc)); AFS_GLOCK(); } #else AFS_FAST_HOLD(tvc); #endif #endif ReleaseReadLock(&afs_xvcache); ObtainWriteLock(&afs_xcbhash, 450); afs_DequeueCallback(tvc); tvc->f.states &= ~(CStatd | CUnique | CBulkFetching); ReleaseWriteLock(&afs_xcbhash); if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))) osi_dnlc_purgedp(tvc); afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, tvc->f.states, ICL_TYPE_LONG, 0); #ifdef CBDEBUG lastCallBack_vnode = afid->Vnode; lastCallBack_dv = tvc->mstat.DataVersion.low; osi_GetuTime(&lastCallBack_time); #endif /* CBDEBUG */ #ifdef AFS_DARWIN80_ENV vnode_put(AFSTOV(tvc)); #endif ObtainReadLock(&afs_xvcache); uvc = tvc->hnext; AFS_FAST_RELE(tvc); } } /*Walk through hash table */ ReleaseReadLock(&afs_xvcache); } /*Clear callbacks for one file */ } /*Fid has non-zero volume ID */ /* * Always return a predictable value. */ return (0); } /*ClearCallBack */