/* remove everything referencing a specific volume */ int osi_dnlc_purgevol(struct VenusFid *fidp) { dnlcstats.purgevols++; osi_dnlc_purge(); return 0; }
/** * Reset volume name to volume id mapping cache. * @param flags */ void afs_CheckVolumeNames(int flags) { afs_int32 i, j; struct volume *tv; unsigned int now; struct vcache *tvc; afs_int32 *volumeID, *cellID, vsize, nvols; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif AFS_STATCNT(afs_CheckVolumeNames); nvols = 0; volumeID = cellID = NULL; vsize = 0; ObtainReadLock(&afs_xvolume); if (flags & AFS_VOLCHECK_EXPIRED) { /* * allocate space to hold the volumeIDs and cellIDs, only if * we will be invalidating the mountpoints later on */ for (i = 0; i < NVOLS; i++) for (tv = afs_volumes[i]; tv; tv = tv->next) ++vsize; volumeID = afs_osi_Alloc(2 * vsize * sizeof(*volumeID)); cellID = (volumeID) ? volumeID + vsize : 0; } now = osi_Time(); for (i = 0; i < NVOLS; i++) { for (tv = afs_volumes[i]; tv; tv = tv->next) { if (flags & AFS_VOLCHECK_EXPIRED) { if (((tv->expireTime < (now + 10)) && (tv->states & VRO)) || (flags & AFS_VOLCHECK_FORCE)) { afs_ResetVolumeInfo(tv); /* also resets status */ if (volumeID) { volumeID[nvols] = tv->volume; cellID[nvols] = tv->cell; } ++nvols; continue; } } /* ??? */ if (flags & (AFS_VOLCHECK_BUSY | AFS_VOLCHECK_FORCE)) { for (j = 0; j < AFS_MAXHOSTS; j++) tv->status[j] = not_busy; } } } ReleaseReadLock(&afs_xvolume); /* next ensure all mt points are re-evaluated */ if (nvols || (flags & (AFS_VOLCHECK_FORCE | AFS_VOLCHECK_MTPTS))) { loop: ObtainReadLock(&afs_xvcache); for (i = 0; i < VCSIZE; i++) { for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { /* if the volume of "mvid" of the vcache entry is among the * ones we found earlier, then we re-evaluate it. Also, if the * force bit is set or we explicitly asked to reevaluate the * mt-pts, we clean the cmvalid bit */ if ((flags & (AFS_VOLCHECK_FORCE | AFS_VOLCHECK_MTPTS)) || (tvc->mvid && inVolList(tvc->mvid, nvols, volumeID, cellID))) tvc->f.states &= ~CMValid; /* If the volume that this file belongs to was reset earlier, * then we should remove its callback. * Again, if forced, always do it. */ if ((tvc->f.states & CRO) && (inVolList(&tvc->f.fid, nvols, volumeID, cellID) || (flags & AFS_VOLCHECK_FORCE))) { if (tvc->f.states & CVInit) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop; } #ifdef AFS_DARWIN80_ENV if (tvc->f.states & CDeadVnode) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop; } tvp = AFSTOV(tvc); if (vnode_get(tvp)) continue; if (vnode_ref(tvp)) { AFS_GUNLOCK(); /* AFSTOV(tvc) may be NULL */ vnode_put(tvp); AFS_GLOCK(); continue; } #else AFS_FAST_HOLD(tvc); #endif ReleaseReadLock(&afs_xvcache); ObtainWriteLock(&afs_xcbhash, 485); /* LOCKXXX: We aren't holding tvc write lock? */ afs_DequeueCallback(tvc); tvc->f.states &= ~CStatd; ReleaseWriteLock(&afs_xcbhash); if (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR)) osi_dnlc_purgedp(tvc); #ifdef AFS_DARWIN80_ENV vnode_put(AFSTOV(tvc)); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); ObtainReadLock(&afs_xvcache); #else ObtainReadLock(&afs_xvcache); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); #endif } } } osi_dnlc_purge(); /* definitely overkill, but it's safer this way. */ ReleaseReadLock(&afs_xvcache); } if (volumeID) afs_osi_Free(volumeID, 2 * vsize * sizeof(*volumeID)); } /*afs_CheckVolumeNames */
struct vcache * osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype) { struct vcache *tvc; unsigned int key, skey; char *ts = aname; struct nc *tnc; int safety; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif if (!afs_usednlc) return 0; dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) return 0; skey = key & (NHSIZE - 1); TRACE(osi_dnlc_lookupT, skey); dnlcstats.lookups++; ObtainReadLock(&afs_xvcache); ObtainReadLock(&afs_xdnlc); for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ( /* (tnc->key == key) && */ (tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { tvc = tnc->vp; break; } else if (tnc->next == nameHash[skey]) { /* end of list */ break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseReadLock(&afs_xdnlc); ReleaseReadLock(&afs_xvcache); osi_dnlc_purge(); return (0); } } ReleaseReadLock(&afs_xdnlc); if (!tvc) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; } else { if ((tvc->f.states & CVInit) #ifdef AFS_DARWIN80_ENV ||(tvc->f.states & CDeadVnode) #endif ) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } #if defined(AFS_DARWIN80_ENV) tvp = AFSTOV(tvc); if (vnode_get(tvp)) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } if (vnode_ref(tvp)) { ReleaseReadLock(&afs_xvcache); AFS_GUNLOCK(); vnode_put(tvp); AFS_GLOCK(); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } #else osi_vnhold(tvc, 0); #endif ReleaseReadLock(&afs_xvcache); } return tvc; }
int osi_dnlc_enter(struct vcache *adp, char *aname, struct vcache *avc, afs_hyper_t * avno) { struct nc *tnc; unsigned int key, skey; char *ts = aname; int safety; if (!afs_usednlc) return 0; TRACE(osi_dnlc_enterT, 0); dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) { return 0; } skey = key & (NHSIZE - 1); dnlcstats.enters++; retry: ObtainWriteLock(&afs_xdnlc, 222); /* Only cache entries from the latest version of the directory */ if (!(adp->f.states & CStatd) || !hsame(*avno, adp->f.m.DataVersion)) { ReleaseWriteLock(&afs_xdnlc); return 0; } /* * Make sure each directory entry gets cached no more than once. */ for (tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ((tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { /* duplicate entry */ break; } else if (tnc->next == nameHash[skey]) { /* end of list */ tnc = NULL; break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseWriteLock(&afs_xdnlc); osi_dnlc_purge(); goto retry; } } if (tnc == NULL) { tnc = GetMeAnEntry(); tnc->dirp = adp; tnc->vp = avc; tnc->key = key; memcpy((char *)tnc->name, aname, ts - aname + 1); /* include the NULL */ InsertEntry(tnc); } else { /* duplicate */ tnc->vp = avc; } ReleaseWriteLock(&afs_xdnlc); return 0; }
struct vcache * osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype) { struct vcache *tvc; int LRUme; unsigned int key, skey; char *ts = aname; struct nc *tnc, *tnc1 = 0; int safety; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif ma_critical_enter(); if (!afs_usednlc) { ma_critical_exit(); return 0; } dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) { ma_critical_exit(); return 0; } skey = key & (NHSIZE - 1); TRACE(osi_dnlc_lookupT, skey); dnlcstats.lookups++; ObtainReadLock(&afs_xvcache); ObtainReadLock(&afs_xdnlc); for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ( /* (tnc->key == key) && */ (tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { tvc = tnc->vp; tnc1 = tnc; break; } else if (tnc->next == nameHash[skey]) { /* end of list */ break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseReadLock(&afs_xdnlc); ReleaseReadLock(&afs_xvcache); osi_dnlc_purge(); ma_critical_exit(); return (0); } } LRUme = 0; /* (tnc != nameHash[skey]); */ ReleaseReadLock(&afs_xdnlc); if (!tvc) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; } else { if ((tvc->f.states & CVInit) #ifdef AFS_DARWIN80_ENV ||(tvc->f.states & CDeadVnode) #endif ) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } #if defined(AFS_DARWIN80_ENV) tvp = AFSTOV(tvc); if (vnode_get(tvp)) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } if (vnode_ref(tvp)) { ReleaseReadLock(&afs_xvcache); AFS_GUNLOCK(); vnode_put(tvp); AFS_GLOCK(); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } #elif defined(AFS_FBSD_ENV) /* can't sleep in a critical section */ ma_critical_exit(); osi_vnhold(tvc, 0); ma_critical_enter(); #else osi_vnhold(tvc, 0); #endif ReleaseReadLock(&afs_xvcache); #ifdef notdef /* * XX If LRUme ever is non-zero change the if statement around because * aix's cc with optimizer on won't necessarily check things in order XX */ if (LRUme && (0 == NBObtainWriteLock(&afs_xdnlc))) { /* don't block to do this */ /* tnc might have been moved during race condition, */ /* but it's always in a legit hash chain when a lock is granted, * or else it's on the freelist so prev == NULL, * so at worst this is redundant */ /* Now that we've got it held, and a lock on the dnlc, we * should check to be sure that there was no race, and * bail out if there was. */ if (tnc->prev) { /* special case for only two elements on list - relative ordering * doesn't change */ if (tnc->prev != tnc->next) { /* remove from old location */ tnc->prev->next = tnc->next; tnc->next->prev = tnc->prev; /* insert into new location */ tnc->next = nameHash[skey]; tnc->prev = tnc->next->prev; tnc->next->prev = tnc; tnc->prev->next = tnc; } nameHash[skey] = tnc; } ReleaseWriteLock(&afs_xdnlc); } #endif } ma_critical_exit(); return tvc; }
int SRXAFSCB_InitCallBackState(struct rx_call *a_call) { int i; struct vcache *tvc; struct rx_connection *tconn; struct rx_peer *peer; struct server *ts; int code = 0; XSTATS_DECLS; RX_AFS_GLOCK(); XSTATS_START_CMTIME(AFS_STATS_CM_RPCIDX_INITCALLBACKSTATE); AFS_STATCNT(SRXAFSCB_InitCallBackState); /* * Find the address of the host making this call */ if ((tconn = rx_ConnectionOf(a_call)) && (peer = rx_PeerOf(tconn))) { afs_allCBs++; afs_oddCBs++; /*Including any missed via create race */ afs_evenCBs++; /*Including any missed via create race */ ts = afs_FindServer(rx_HostOf(peer), rx_PortOf(peer), (afsUUID *) 0, 0); if (ts) { for (i = 0; i < VCSIZE; i++) for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { if (tvc->callback == ts) { ObtainWriteLock(&afs_xcbhash, 451); afs_DequeueCallback(tvc); tvc->callback = NULL; tvc->f.states &= ~(CStatd | CUnique | CBulkFetching); ReleaseWriteLock(&afs_xcbhash); } } /* capabilities need be requested again */ ObtainWriteLock(&afs_xserver, 877); ts->flags &= ~SCAPS_KNOWN; ReleaseWriteLock(&afs_xserver); } /* find any volumes residing on this server and flush their state */ { struct volume *tv; int j; for (i = 0; i < NVOLS; i++) for (tv = afs_volumes[i]; tv; tv = tv->next) { for (j = 0; j < AFS_MAXHOSTS; j++) if (tv->serverHost[j] == ts) afs_ResetVolumeInfo(tv); } } osi_dnlc_purge(); /* may be a little bit extreme */ } XSTATS_END_TIME; RX_AFS_GUNLOCK(); return (0); } /*SRXAFSCB_InitCallBackState */