int osi_dnlc_remove(struct vcache *adp, char *aname, struct vcache *avc) { unsigned int key, skey; char *ts = aname; struct nc *tnc; if (!afs_usednlc) return 0; TRACE(osi_dnlc_removeT, skey); dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) { return 0; } skey = key & (NHSIZE - 1); TRACE(osi_dnlc_removeT, skey); dnlcstats.removes++; ObtainReadLock(&afs_xdnlc); for (tnc = nameHash[skey]; tnc; tnc = tnc->next) { if ((tnc->dirp == adp) && (tnc->key == key) && (!strcmp((char *)tnc->name, aname))) { tnc->dirp = NULL; /* now it won't match anything */ break; } else if (tnc->next == nameHash[skey]) { /* end of list */ tnc = NULL; break; } } ReleaseReadLock(&afs_xdnlc); if (!tnc) return 0; /* there is a little race condition here, but it's relatively * harmless. At worst, I wind up removing a mapping that I just * created. */ if (EWOULDBLOCK == NBObtainWriteLock(&afs_xdnlc, 1)) { return 0; /* no big deal, tnc will get recycled eventually */ } RemoveEntry(tnc, skey); tnc->next = ncfreelist; ncfreelist = tnc; ReleaseWriteLock(&afs_xdnlc); return 0; }
/*! * Remove anything pertaining to this file * * \param File vcache entry. * \return 0 */ int osi_dnlc_purgevp(struct vcache *avc) { int i; int writelocked; #ifdef AFS_DARWIN_ENV if (!(avc->f.states & (CVInit | CVFlushed #ifdef AFS_DARWIN80_ENV | CDeadVnode #endif )) && AFSTOV(avc)) cache_purge(AFSTOV(avc)); #endif if (!afs_usednlc) return 0; dnlcstats.purgevs++; TRACE(osi_dnlc_purgevpT, 0); writelocked = (0 == NBObtainWriteLock(&afs_xdnlc, 3)); for (i = 0; i < NCSIZE; i++) { if (nameCache[i].vp == avc) { nameCache[i].dirp = nameCache[i].vp = NULL; /* can't simply break; because of hard links -- might be two */ /* different entries with same vnode */ if (writelocked && nameCache[i].prev) { RemoveEntry(&nameCache[i], nameCache[i].key & (NHSIZE - 1)); nameCache[i].next = ncfreelist; ncfreelist = &nameCache[i]; } } } if (writelocked) ReleaseWriteLock(&afs_xdnlc); return 0; }
/* remove everything */ int osi_dnlc_purge(void) { int i; dnlcstats.purges++; TRACE(osi_dnlc_purgeT, 0); if (EWOULDBLOCK == NBObtainWriteLock(&afs_xdnlc, 4)) { /* couldn't get lock */ for (i = 0; i < NCSIZE; i++) nameCache[i].dirp = nameCache[i].vp = NULL; } else { /* did get the lock */ ncfreelist = NULL; memset(nameCache, 0, sizeof(struct nc) * NCSIZE); memset(nameHash, 0, sizeof(struct nc *) * NHSIZE); for (i = 0; i < NCSIZE; i++) { nameCache[i].next = ncfreelist; ncfreelist = &nameCache[i]; } ReleaseWriteLock(&afs_xdnlc); } return 0; }
/*! * Remove anything pertaining to this directory. I can invalidate * things without the lock, since I am just looking through the array, * but to move things off the lists or into the freelist, I need the * write lock * * \param adp vcache entry for the directory to be purged. * \return 0 */ int osi_dnlc_purgedp(struct vcache *adp) { int i; int writelocked; #ifdef AFS_DARWIN_ENV if (!(adp->f.states & (CVInit | CVFlushed #ifdef AFS_DARWIN80_ENV | CDeadVnode #endif )) && AFSTOV(adp)) cache_purge(AFSTOV(adp)); #endif if (!afs_usednlc) return 0; dnlcstats.purgeds++; TRACE(osi_dnlc_purgedpT, 0); writelocked = (0 == NBObtainWriteLock(&afs_xdnlc, 2)); for (i = 0; i < NCSIZE; i++) { if ((nameCache[i].dirp == adp) || (nameCache[i].vp == adp)) { nameCache[i].dirp = nameCache[i].vp = NULL; if (writelocked && nameCache[i].prev) { RemoveEntry(&nameCache[i], nameCache[i].key & (NHSIZE - 1)); nameCache[i].next = ncfreelist; ncfreelist = &nameCache[i]; } } } if (writelocked) ReleaseWriteLock(&afs_xdnlc); return 0; }
/* Note that we don't set CDirty here, this is OK because the unlink * RPC is called synchronously */ int afs_remove(OSI_VC_DECL(adp), char *aname, afs_ucred_t *acred) { struct vrequest treq; register struct dcache *tdc; struct VenusFid unlinkFid; register afs_int32 code; register struct vcache *tvc; afs_size_t offset, len; struct afs_fakestat_state fakestate; OSI_VC_CONVERT(adp); AFS_STATCNT(afs_remove); afs_Trace2(afs_iclSetp, CM_TRACE_REMOVE, ICL_TYPE_POINTER, adp, ICL_TYPE_STRING, aname); if ((code = afs_InitReq(&treq, acred))) { return code; } afs_InitFakeStat(&fakestate); AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&adp, &fakestate, &treq); if (code) goto done; /* Check if this is dynroot */ if (afs_IsDynroot(adp)) { code = afs_DynrootVOPRemove(adp, acred, aname); goto done; } if (afs_IsDynrootMount(adp)) { code = ENOENT; goto done; } if (strlen(aname) > AFSNAMEMAX) { code = ENAMETOOLONG; goto done; } tagain: code = afs_VerifyVCache(adp, &treq); tvc = NULL; if (code) { code = afs_CheckCode(code, &treq, 23); goto done; } /** If the volume is read-only, return error without making an RPC to the * fileserver */ if (adp->f.states & CRO) { code = EROFS; goto done; } /* If we're running disconnected without logging, go no further... */ if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { code = ENETDOWN; goto done; } tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1); /* test for error below */ ObtainWriteLock(&adp->lock, 142); if (tdc) ObtainSharedLock(&tdc->lock, 638); /* * Make sure that the data in the cache is current. We may have * received a callback while we were waiting for the write lock. */ if (!(adp->f.states & CStatd) || (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) { ReleaseWriteLock(&adp->lock); if (tdc) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } goto tagain; } unlinkFid.Fid.Vnode = 0; if (!tvc) { tvc = osi_dnlc_lookup(adp, aname, WRITE_LOCK); } /* This should not be necessary since afs_lookup() has already * done the work. */ if (!tvc) if (tdc) { code = afs_dir_Lookup(tdc, aname, &unlinkFid.Fid); if (code == 0) { afs_int32 cached = 0; unlinkFid.Cell = adp->f.fid.Cell; unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume; if (unlinkFid.Fid.Unique == 0) { tvc = afs_LookupVCache(&unlinkFid, &treq, &cached, adp, aname); } else { ObtainReadLock(&afs_xvcache); tvc = afs_FindVCache(&unlinkFid, 0, DO_STATS); ReleaseReadLock(&afs_xvcache); } } } if (AFS_IS_DISCON_RW) { if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) { /* Make shadow copy of parent dir. */ afs_MakeShadowDir(adp, tdc); } /* Can't hold a dcache lock whilst we're getting a vcache one */ if (tdc) ReleaseSharedLock(&tdc->lock); /* XXX - We're holding adp->lock still, and we've got no * guarantee about whether the ordering matches the lock hierarchy */ ObtainWriteLock(&tvc->lock, 713); /* If we were locally created, then we don't need to do very * much beyond ensuring that we don't exist anymore */ if (tvc->f.ddirty_flags & VDisconCreate) { afs_DisconRemoveDirty(tvc); } else { /* Add removed file vcache to dirty list. */ afs_DisconAddDirty(tvc, VDisconRemove, 1); } adp->f.m.LinkCount--; ReleaseWriteLock(&tvc->lock); if (tdc) ObtainSharedLock(&tdc->lock, 714); } if (tvc && osi_Active(tvc)) { /* about to delete whole file, prefetch it first */ ReleaseWriteLock(&adp->lock); if (tdc) ReleaseSharedLock(&tdc->lock); ObtainWriteLock(&tvc->lock, 143); FetchWholeEnchilada(tvc, &treq); ReleaseWriteLock(&tvc->lock); ObtainWriteLock(&adp->lock, 144); /* Technically I don't think we need this back, but let's hold it anyway; The "got" reference should actually be sufficient. */ if (tdc) ObtainSharedLock(&tdc->lock, 640); } osi_dnlc_remove(adp, aname, tvc); Tadp1 = adp; #ifndef AFS_DARWIN80_ENV Tadpr = VREFCOUNT(adp); #endif Ttvc = tvc; Tnam = aname; Tnam1 = 0; #ifndef AFS_DARWIN80_ENV if (tvc) Ttvcr = VREFCOUNT(tvc); #endif #ifdef AFS_AIX_ENV if (tvc && VREFCOUNT_GT(tvc, 2) && tvc->opens > 0 && !(tvc->f.states & CUnlinked)) { #else if (tvc && VREFCOUNT_GT(tvc, 1) && tvc->opens > 0 && !(tvc->f.states & CUnlinked)) { #endif char *unlname = afs_newname(); ReleaseWriteLock(&adp->lock); if (tdc) ReleaseSharedLock(&tdc->lock); code = afsrename(adp, aname, adp, unlname, acred, &treq); Tnam1 = unlname; if (!code) { struct VenusFid *oldmvid = NULL; if (tvc->mvid) oldmvid = tvc->mvid; tvc->mvid = (struct VenusFid *)unlname; if (oldmvid) osi_FreeSmallSpace(oldmvid); crhold(acred); if (tvc->uncred) { crfree(tvc->uncred); } tvc->uncred = acred; tvc->f.states |= CUnlinked; /* if rename succeeded, remove should not */ ObtainWriteLock(&tvc->lock, 715); if (tvc->f.ddirty_flags & VDisconRemove) { tvc->f.ddirty_flags &= ~VDisconRemove; } ReleaseWriteLock(&tvc->lock); } else { osi_FreeSmallSpace(unlname); } if (tdc) afs_PutDCache(tdc); afs_PutVCache(tvc); } else { code = afsremove(adp, tdc, tvc, aname, acred, &treq); } done: afs_PutFakeStat(&fakestate); #ifndef AFS_DARWIN80_ENV /* we can't track by thread, it's not exported in the KPI; only do this on !macos */ osi_Assert(!WriteLocked(&adp->lock) || (adp->lock.pid_writer != MyPidxx)); #endif AFS_DISCON_UNLOCK(); return code; } /* afs_remunlink -- This tries to delete the file at the server after it has * been renamed when unlinked locally but now has been finally released. * * CAUTION -- may be called with avc unheld. */ int afs_remunlink(register struct vcache *avc, register int doit) { afs_ucred_t *cred; char *unlname; struct vcache *adp; struct vrequest treq; struct VenusFid dirFid; register struct dcache *tdc; afs_int32 code = 0; if (NBObtainWriteLock(&avc->lock, 423)) return 0; #if defined(AFS_DARWIN80_ENV) if (vnode_get(AFSTOV(avc))) { ReleaseWriteLock(&avc->lock); return 0; } #endif if (avc->mvid && (doit || (avc->f.states & CUnlinkedDel))) { if ((code = afs_InitReq(&treq, avc->uncred))) { ReleaseWriteLock(&avc->lock); } else { /* Must bump the refCount because GetVCache may block. * Also clear mvid so no other thread comes here if we block. */ unlname = (char *)avc->mvid; avc->mvid = NULL; cred = avc->uncred; avc->uncred = NULL; #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV) VREF(AFSTOV(avc)); #else AFS_FAST_HOLD(avc); #endif /* We'll only try this once. If it fails, just release the vnode. * Clear after doing hold so that NewVCache doesn't find us yet. */ avc->f.states &= ~(CUnlinked | CUnlinkedDel); ReleaseWriteLock(&avc->lock); dirFid.Cell = avc->f.fid.Cell; dirFid.Fid.Volume = avc->f.fid.Fid.Volume; dirFid.Fid.Vnode = avc->f.parent.vnode; dirFid.Fid.Unique = avc->f.parent.unique; adp = afs_GetVCache(&dirFid, &treq, NULL, NULL); if (adp) { tdc = afs_FindDCache(adp, (afs_size_t) 0); ObtainWriteLock(&adp->lock, 159); if (tdc) ObtainSharedLock(&tdc->lock, 639); /* afsremove releases the adp & tdc locks, and does vn_rele(avc) */ code = afsremove(adp, tdc, avc, unlname, cred, &treq); afs_PutVCache(adp); } else { /* we failed - and won't be back to try again. */ afs_PutVCache(avc); } osi_FreeSmallSpace(unlname); crfree(cred); } } else { #if defined(AFS_DARWIN80_ENV) vnode_put(AFSTOV(avc)); #endif ReleaseWriteLock(&avc->lock); } return code; }
/* works like PFlushVolumeData */ void darwin_notify_perms(struct unixuser *auser, int event) { int i; struct afs_q *tq, *uq = NULL; struct vcache *tvc, *hnext; int isglock = ISAFS_GLOCK(); struct vnode *vp; struct vnode_attr va; int isctxtowner = 0; if (!afs_darwin_fsevents) return; VATTR_INIT(&va); VATTR_SET(&va, va_mode, 0777); if (event & UTokensObtained) VATTR_SET(&va, va_uid, auser->uid); else VATTR_SET(&va, va_uid, -2); /* nobody */ if (!isglock) AFS_GLOCK(); if (!(vfs_context_owner == current_thread())) { get_vfs_context(); isctxtowner = 1; } loop: ObtainReadLock(&afs_xvcache); for (i = 0; i < VCSIZE; i++) { for (tq = afs_vhashTV[i].prev; tq != &afs_vhashTV[i]; tq = uq) { uq = QPrev(tq); tvc = QTOVH(tq); if (tvc->f.states & CDeadVnode) { /* we can afford to be best-effort */ continue; } /* no per-file acls, so only notify on directories */ if (!(vp = AFSTOV(tvc)) || !vnode_isdir(AFSTOV(tvc))) continue; /* dynroot object. no callbacks. anonymous ACL. just no. */ if (afs_IsDynrootFid(&tvc->f.fid)) continue; /* no fake fsevents on mount point sources. leaks refs */ if (tvc->mvstat == 1) continue; /* if it's being reclaimed, just pass */ if (vnode_get(vp)) continue; if (vnode_ref(vp)) { AFS_GUNLOCK(); vnode_put(vp); AFS_GLOCK(); continue; } ReleaseReadLock(&afs_xvcache); /* Avoid potentially re-entering on this lock */ if (0 == NBObtainWriteLock(&tvc->lock, 234)) { tvc->f.states |= CEvent; AFS_GUNLOCK(); vnode_setattr(vp, &va, afs_osi_ctxtp); tvc->f.states &= ~CEvent; vnode_put(vp); AFS_GLOCK(); ReleaseWriteLock(&tvc->lock); } ObtainReadLock(&afs_xvcache); uq = QPrev(tq); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); } } ReleaseReadLock(&afs_xvcache); if (isctxtowner) put_vfs_context(); if (!isglock) AFS_GUNLOCK(); }
struct vcache * osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype) { struct vcache *tvc; int LRUme; unsigned int key, skey; char *ts = aname; struct nc *tnc, *tnc1 = 0; int safety; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif ma_critical_enter(); if (!afs_usednlc) { ma_critical_exit(); return 0; } dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) { ma_critical_exit(); return 0; } skey = key & (NHSIZE - 1); TRACE(osi_dnlc_lookupT, skey); dnlcstats.lookups++; ObtainReadLock(&afs_xvcache); ObtainReadLock(&afs_xdnlc); for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ( /* (tnc->key == key) && */ (tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { tvc = tnc->vp; tnc1 = tnc; break; } else if (tnc->next == nameHash[skey]) { /* end of list */ break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseReadLock(&afs_xdnlc); ReleaseReadLock(&afs_xvcache); osi_dnlc_purge(); ma_critical_exit(); return (0); } } LRUme = 0; /* (tnc != nameHash[skey]); */ ReleaseReadLock(&afs_xdnlc); if (!tvc) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; } else { if ((tvc->f.states & CVInit) #ifdef AFS_DARWIN80_ENV ||(tvc->f.states & CDeadVnode) #endif ) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } #if defined(AFS_DARWIN80_ENV) tvp = AFSTOV(tvc); if (vnode_get(tvp)) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } if (vnode_ref(tvp)) { ReleaseReadLock(&afs_xvcache); AFS_GUNLOCK(); vnode_put(tvp); AFS_GLOCK(); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } #elif defined(AFS_FBSD_ENV) /* can't sleep in a critical section */ ma_critical_exit(); osi_vnhold(tvc, 0); ma_critical_enter(); #else osi_vnhold(tvc, 0); #endif ReleaseReadLock(&afs_xvcache); #ifdef notdef /* * XX If LRUme ever is non-zero change the if statement around because * aix's cc with optimizer on won't necessarily check things in order XX */ if (LRUme && (0 == NBObtainWriteLock(&afs_xdnlc))) { /* don't block to do this */ /* tnc might have been moved during race condition, */ /* but it's always in a legit hash chain when a lock is granted, * or else it's on the freelist so prev == NULL, * so at worst this is redundant */ /* Now that we've got it held, and a lock on the dnlc, we * should check to be sure that there was no race, and * bail out if there was. */ if (tnc->prev) { /* special case for only two elements on list - relative ordering * doesn't change */ if (tnc->prev != tnc->next) { /* remove from old location */ tnc->prev->next = tnc->next; tnc->next->prev = tnc->prev; /* insert into new location */ tnc->next = nameHash[skey]; tnc->prev = tnc->next->prev; tnc->next->prev = tnc; tnc->prev->next = tnc; } nameHash[skey] = tnc; } ReleaseWriteLock(&afs_xdnlc); } #endif } ma_critical_exit(); return tvc; }