int osi_dnlc_remove(struct vcache *adp, char *aname, struct vcache *avc) { unsigned int key, skey; char *ts = aname; struct nc *tnc; if (!afs_usednlc) return 0; TRACE(osi_dnlc_removeT, skey); dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) { return 0; } skey = key & (NHSIZE - 1); TRACE(osi_dnlc_removeT, skey); dnlcstats.removes++; ObtainReadLock(&afs_xdnlc); for (tnc = nameHash[skey]; tnc; tnc = tnc->next) { if ((tnc->dirp == adp) && (tnc->key == key) && (!strcmp((char *)tnc->name, aname))) { tnc->dirp = NULL; /* now it won't match anything */ break; } else if (tnc->next == nameHash[skey]) { /* end of list */ tnc = NULL; break; } } ReleaseReadLock(&afs_xdnlc); if (!tnc) return 0; /* there is a little race condition here, but it's relatively * harmless. At worst, I wind up removing a mapping that I just * created. */ if (EWOULDBLOCK == NBObtainWriteLock(&afs_xdnlc, 1)) { return 0; /* no big deal, tnc will get recycled eventually */ } RemoveEntry(tnc, skey); tnc->next = ncfreelist; ncfreelist = tnc; ReleaseWriteLock(&afs_xdnlc); return 0; }
int osi_dnlc_enter(struct vcache *adp, char *aname, struct vcache *avc, afs_hyper_t * avno) { struct nc *tnc; unsigned int key, skey; char *ts = aname; int safety; if (!afs_usednlc) return 0; TRACE(osi_dnlc_enterT, 0); dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) { return 0; } skey = key & (NHSIZE - 1); dnlcstats.enters++; retry: ObtainWriteLock(&afs_xdnlc, 222); /* Only cache entries from the latest version of the directory */ if (!(adp->f.states & CStatd) || !hsame(*avno, adp->f.m.DataVersion)) { ReleaseWriteLock(&afs_xdnlc); return 0; } /* * Make sure each directory entry gets cached no more than once. */ for (tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ((tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { /* duplicate entry */ break; } else if (tnc->next == nameHash[skey]) { /* end of list */ tnc = NULL; break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseWriteLock(&afs_xdnlc); osi_dnlc_purge(); goto retry; } } if (tnc == NULL) { tnc = GetMeAnEntry(); tnc->dirp = adp; tnc->vp = avc; tnc->key = key; memcpy((char *)tnc->name, aname, ts - aname + 1); /* include the NULL */ InsertEntry(tnc); } else { /* duplicate */ tnc->vp = avc; } ReleaseWriteLock(&afs_xdnlc); return 0; }
struct vcache * osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype) { struct vcache *tvc; unsigned int key, skey; char *ts = aname; struct nc *tnc; int safety; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif if (!afs_usednlc) return 0; dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) return 0; skey = key & (NHSIZE - 1); TRACE(osi_dnlc_lookupT, skey); dnlcstats.lookups++; ObtainReadLock(&afs_xvcache); ObtainReadLock(&afs_xdnlc); for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ( /* (tnc->key == key) && */ (tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { tvc = tnc->vp; break; } else if (tnc->next == nameHash[skey]) { /* end of list */ break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseReadLock(&afs_xdnlc); ReleaseReadLock(&afs_xvcache); osi_dnlc_purge(); return (0); } } ReleaseReadLock(&afs_xdnlc); if (!tvc) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; } else { if ((tvc->f.states & CVInit) #ifdef AFS_DARWIN80_ENV ||(tvc->f.states & CDeadVnode) #endif ) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } #if defined(AFS_DARWIN80_ENV) tvp = AFSTOV(tvc); if (vnode_get(tvp)) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } if (vnode_ref(tvp)) { ReleaseReadLock(&afs_xvcache); AFS_GUNLOCK(); vnode_put(tvp); AFS_GLOCK(); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); return 0; } #else osi_vnhold(tvc, 0); #endif ReleaseReadLock(&afs_xvcache); } return tvc; }
struct vcache * osi_dnlc_lookup(struct vcache *adp, char *aname, int locktype) { struct vcache *tvc; int LRUme; unsigned int key, skey; char *ts = aname; struct nc *tnc, *tnc1 = 0; int safety; #ifdef AFS_DARWIN80_ENV vnode_t tvp; #endif ma_critical_enter(); if (!afs_usednlc) { ma_critical_exit(); return 0; } dnlcHash(ts, key); /* leaves ts pointing at the NULL */ if (ts - aname >= AFSNCNAMESIZE) { ma_critical_exit(); return 0; } skey = key & (NHSIZE - 1); TRACE(osi_dnlc_lookupT, skey); dnlcstats.lookups++; ObtainReadLock(&afs_xvcache); ObtainReadLock(&afs_xdnlc); for (tvc = NULL, tnc = nameHash[skey], safety = 0; tnc; tnc = tnc->next, safety++) { if ( /* (tnc->key == key) && */ (tnc->dirp == adp) && (!strcmp((char *)tnc->name, aname))) { tvc = tnc->vp; tnc1 = tnc; break; } else if (tnc->next == nameHash[skey]) { /* end of list */ break; } else if (safety > NCSIZE) { afs_warn("DNLC cycle"); dnlcstats.cycles++; ReleaseReadLock(&afs_xdnlc); ReleaseReadLock(&afs_xvcache); osi_dnlc_purge(); ma_critical_exit(); return (0); } } LRUme = 0; /* (tnc != nameHash[skey]); */ ReleaseReadLock(&afs_xdnlc); if (!tvc) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; } else { if ((tvc->f.states & CVInit) #ifdef AFS_DARWIN80_ENV ||(tvc->f.states & CDeadVnode) #endif ) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } #if defined(AFS_DARWIN80_ENV) tvp = AFSTOV(tvc); if (vnode_get(tvp)) { ReleaseReadLock(&afs_xvcache); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } if (vnode_ref(tvp)) { ReleaseReadLock(&afs_xvcache); AFS_GUNLOCK(); vnode_put(tvp); AFS_GLOCK(); dnlcstats.misses++; osi_dnlc_remove(adp, aname, tvc); ma_critical_exit(); return 0; } #elif defined(AFS_FBSD_ENV) /* can't sleep in a critical section */ ma_critical_exit(); osi_vnhold(tvc, 0); ma_critical_enter(); #else osi_vnhold(tvc, 0); #endif ReleaseReadLock(&afs_xvcache); #ifdef notdef /* * XX If LRUme ever is non-zero change the if statement around because * aix's cc with optimizer on won't necessarily check things in order XX */ if (LRUme && (0 == NBObtainWriteLock(&afs_xdnlc))) { /* don't block to do this */ /* tnc might have been moved during race condition, */ /* but it's always in a legit hash chain when a lock is granted, * or else it's on the freelist so prev == NULL, * so at worst this is redundant */ /* Now that we've got it held, and a lock on the dnlc, we * should check to be sure that there was no race, and * bail out if there was. */ if (tnc->prev) { /* special case for only two elements on list - relative ordering * doesn't change */ if (tnc->prev != tnc->next) { /* remove from old location */ tnc->prev->next = tnc->next; tnc->next->prev = tnc->prev; /* insert into new location */ tnc->next = nameHash[skey]; tnc->prev = tnc->next->prev; tnc->next->prev = tnc; tnc->prev->next = tnc; } nameHash[skey] = tnc; } ReleaseWriteLock(&afs_xdnlc); } #endif } ma_critical_exit(); return tvc; }