/*! * Print list of disconnected files. * * \note Call with afs_DDirtyVCListLock read locked. */ void afs_DbgDisconFiles(void) { struct vcache *tvc; struct afs_q *q; int i = 0; afs_warn("List of dirty files: \n"); ObtainReadLock(&afs_disconDirtyLock); for (q = QPrev(&afs_disconDirty); q != &afs_disconDirty; q = QPrev(q)) { tvc = QEntry(q, struct vcache, dirtyq); afs_warn("Cell=%u Volume=%u VNode=%u Unique=%u\n", tvc->f.fid.Cell, tvc->f.fid.Fid.Volume, tvc->f.fid.Fid.Vnode, tvc->f.fid.Fid.Unique); i++; if (i >= 30) osi_Panic("afs_DbgDisconFiles: loop in dirty list\n"); } ReleaseReadLock(&afs_disconDirtyLock); }
int idbg_afsvfslist() { struct vcache *tvc; register struct afs_q *tq; struct afs_q *uq; afs_int32 nodeid; /* what ls prints as 'inode' */ AFS_GLOCK(); for (tq = VLRU.prev; tq != &VLRU; tq = uq) { tvc = QTOV(tq); uq = QPrev(tq); nodeid = tvc->f.fid.Fid.Vnode + (tvc->f.fid.Fid.Volume << 16); nodeid &= 0x7fffffff; qprintf("avp 0x%x type %s cnt %d pg %d map %d nodeid %d(0x%x)\n", tvc, tab_vtypes[((vnode_t *) tvc)->v_type], ((vnode_t *) tvc)->v_count, (int)VN_GET_PGCNT((vnode_t *) tvc), (int)tvc->mapcnt, nodeid, nodeid); } AFS_GUNLOCK(); return 0; }
/* afs_FlushCBs * to be used only in dire circumstances, this drops all callbacks on * the floor, without giving them back to the server. It's ok, the server can * deal with it, but it is a little bit rude. */ void afs_FlushCBs(void) { register int i; register struct vcache *tvc; ObtainWriteLock(&afs_xcbhash, 86); /* pretty likely I'm going to remove something */ for (i = 0; i < VCSIZE; i++) /* reset all the vnodes */ for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { tvc->callback = 0; tvc->dchint = NULL; /* invalidate hints */ tvc->f.states &= ~(CStatd); if (QPrev(&(tvc->callsort))) QRemove(&(tvc->callsort)); if (!(tvc->f.states & (CVInit|CVFlushed)) && ((tvc->f.fid.Fid.Vnode & 1) || (vType(tvc) == VDIR))) osi_dnlc_purgedp(tvc); } afs_InitCBQueue(0); ReleaseWriteLock(&afs_xcbhash); }
void afs_CheckCallbacks(unsigned int secs) { struct vcache *tvc; register struct afs_q *tq; struct afs_q *uq; afs_uint32 now; struct volume *tvp; register int safety; ObtainWriteLock(&afs_xcbhash, 85); /* pretty likely I'm going to remove something */ now = osi_Time(); for (safety = 0, tq = cbHashT[base].head.prev; (safety <= CBQ_LIMIT) && (tq != &(cbHashT[base].head)); tq = uq, safety++) { uq = QPrev(tq); tvc = CBQTOV(tq); if (tvc->cbExpires < now + secs) { /* race #1 here */ /* Get the volume, and if its callback expiration time is more than secs * seconds into the future, update this vcache entry and requeue it below */ if ((tvc->f.states & CRO) && (tvp = afs_FindVolume(&(tvc->f.fid), READ_LOCK))) { if (tvp->expireTime > now + secs) { tvc->cbExpires = tvp->expireTime; /* XXX race here */ } else { int i; for (i = 0; i < MAXHOSTS && tvp->serverHost[i]; i++) { if (!(tvp->serverHost[i]->flags & SRVR_ISDOWN)) { /* What about locking xvcache or vrefcount++ or * write locking tvc? */ QRemove(tq); tvc->f.states &= ~(CStatd | CMValid | CUnique); if (!(tvc->f.states & (CVInit|CVFlushed)) && (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))) osi_dnlc_purgedp(tvc); tvc->dchint = NULL; /*invalidate em */ afs_ResetVolumeInfo(tvp); break; } } } afs_PutVolume(tvp, READ_LOCK); } else { /* Do I need to worry about things like execsorwriters? * What about locking xvcache or vrefcount++ or write locking tvc? */ QRemove(tq); tvc->f.states &= ~(CStatd | CMValid | CUnique); if (!(tvc->f.states & (CVInit|CVFlushed)) && (tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))) osi_dnlc_purgedp(tvc); } } if ((tvc->cbExpires > basetime) && CBHash(tvc->cbExpires - basetime)) { /* it's been renewed on us. Have to be careful not to put it back * into this slot, or we may never get out of here. */ int slot; slot = (CBHash(tvc->cbExpires - basetime) + base) % CBHTSIZE; if (slot != base) { if (QPrev(tq)) QRemove(&(tvc->callsort)); QAdd(&(cbHashT[slot].head), &(tvc->callsort)); /* XXX remember to update volume expiration time */ /* -- not needed for correctness, though */ } } } if (safety > CBQ_LIMIT) { afs_stats_cmperf.cbloops++; if (afs_paniconwarn) osi_Panic("CheckCallbacks"); afs_warn ("AFS Internal Error (minor): please contact AFS Product Support.\n"); ReleaseWriteLock(&afs_xcbhash); afs_FlushCBs(); return; } else ReleaseWriteLock(&afs_xcbhash); /* XXX future optimization: if this item has been recently accessed, queue up a stat for it. { struct dcache * adc; ObtainReadLock(&afs_xdcache); if ((adc = tvc->quick.dc) && (adc->stamp == tvc->quick.stamp) && (afs_indexTimes[adc->index] > afs_indexCounter - 20)) { queue up the stat request } ReleaseReadLock(&afs_xdcache); } */ return; } /* afs_CheckCallback */
/* works like PFlushVolumeData */ void darwin_notify_perms(struct unixuser *auser, int event) { int i; struct afs_q *tq, *uq = NULL; struct vcache *tvc, *hnext; int isglock = ISAFS_GLOCK(); struct vnode *vp; struct vnode_attr va; int isctxtowner = 0; if (!afs_darwin_fsevents) return; VATTR_INIT(&va); VATTR_SET(&va, va_mode, 0777); if (event & UTokensObtained) VATTR_SET(&va, va_uid, auser->uid); else VATTR_SET(&va, va_uid, -2); /* nobody */ if (!isglock) AFS_GLOCK(); if (!(vfs_context_owner == current_thread())) { get_vfs_context(); isctxtowner = 1; } loop: ObtainReadLock(&afs_xvcache); for (i = 0; i < VCSIZE; i++) { for (tq = afs_vhashTV[i].prev; tq != &afs_vhashTV[i]; tq = uq) { uq = QPrev(tq); tvc = QTOVH(tq); if (tvc->f.states & CDeadVnode) { /* we can afford to be best-effort */ continue; } /* no per-file acls, so only notify on directories */ if (!(vp = AFSTOV(tvc)) || !vnode_isdir(AFSTOV(tvc))) continue; /* dynroot object. no callbacks. anonymous ACL. just no. */ if (afs_IsDynrootFid(&tvc->f.fid)) continue; /* no fake fsevents on mount point sources. leaks refs */ if (tvc->mvstat == 1) continue; /* if it's being reclaimed, just pass */ if (vnode_get(vp)) continue; if (vnode_ref(vp)) { AFS_GUNLOCK(); vnode_put(vp); AFS_GLOCK(); continue; } ReleaseReadLock(&afs_xvcache); /* Avoid potentially re-entering on this lock */ if (0 == NBObtainWriteLock(&tvc->lock, 234)) { tvc->f.states |= CEvent; AFS_GUNLOCK(); vnode_setattr(vp, &va, afs_osi_ctxtp); tvc->f.states &= ~CEvent; vnode_put(vp); AFS_GLOCK(); ReleaseWriteLock(&tvc->lock); } ObtainReadLock(&afs_xvcache); uq = QPrev(tq); /* our tvc ptr is still good until now */ AFS_FAST_RELE(tvc); } } ReleaseReadLock(&afs_xvcache); if (isctxtowner) put_vfs_context(); if (!isglock) AFS_GUNLOCK(); }
static int ClearCallBack(struct rx_connection *a_conn, struct AFSFid *a_fid) { struct vcache *tvc; int i; struct VenusFid localFid; struct volume *tv; #ifdef AFS_DARWIN80_ENV vnode_t vp; #endif AFS_STATCNT(ClearCallBack); AFS_ASSERT_GLOCK(); /* * XXXX Don't hold any server locks here because of callback protocol XXX */ localFid.Cell = 0; localFid.Fid.Volume = a_fid->Volume; localFid.Fid.Vnode = a_fid->Vnode; localFid.Fid.Unique = a_fid->Unique; /* * Volume ID of zero means don't do anything. */ if (a_fid->Volume != 0) { if (a_fid->Vnode == 0) { struct afs_q *tq, *uq; /* * Clear callback for the whole volume. Zip through the * hash chain, nullifying entries whose volume ID matches. */ loop1: ObtainReadLock(&afs_xvcache); i = VCHashV(&localFid); for (tq = afs_vhashTV[i].prev; tq != &afs_vhashTV[i]; tq = uq) { uq = QPrev(tq); tvc = QTOVH(tq); if (tvc->f.fid.Fid.Volume == a_fid->Volume) { tvc->callback = NULL; if (!localFid.Cell) localFid.Cell = tvc->f.fid.Cell; tvc->dchint = NULL; /* invalidate hints */ if (tvc->f.states & CVInit) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop1; } #if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_HPUX_ENV) || defined(AFS_LINUX20_ENV) AFS_FAST_HOLD(tvc); #else #ifdef AFS_DARWIN80_ENV if (tvc->f.states & CDeadVnode) { if (!(tvc->f.states & CBulkFetching)) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop1; } } vp = AFSTOV(tvc); if (vnode_get(vp)) continue; if (vnode_ref(vp)) { AFS_GUNLOCK(); vnode_put(vp); AFS_GLOCK(); continue; } if (tvc->f.states & (CBulkFetching|CDeadVnode)) { AFS_GUNLOCK(); vnode_recycle(AFSTOV(tvc)); AFS_GLOCK(); } #else AFS_FAST_HOLD(tvc); #endif #endif ReleaseReadLock(&afs_xvcache); ObtainWriteLock(&afs_xcbhash, 449); afs_DequeueCallback(tvc); tvc->f.states &= ~(CStatd | CUnique | CBulkFetching); afs_allCBs++; if (tvc->f.fid.Fid.Vnode & 1) afs_oddCBs++; else afs_evenCBs++; ReleaseWriteLock(&afs_xcbhash); if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))) osi_dnlc_purgedp(tvc); afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, tvc->f.states, ICL_TYPE_INT32, a_fid->Volume); #ifdef AFS_DARWIN80_ENV vnode_put(AFSTOV(tvc)); #endif ObtainReadLock(&afs_xvcache); uq = QPrev(tq); AFS_FAST_RELE(tvc); } else if ((tvc->f.states & CMValid) && (tvc->mvid->Fid.Volume == a_fid->Volume)) { tvc->f.states &= ~CMValid; if (!localFid.Cell) localFid.Cell = tvc->mvid->Cell; } } ReleaseReadLock(&afs_xvcache); /* * XXXX Don't hold any locks here XXXX */ tv = afs_FindVolume(&localFid, 0); if (tv) { afs_ResetVolumeInfo(tv); afs_PutVolume(tv, 0); /* invalidate mtpoint? */ } } /*Clear callbacks for whole volume */ else { /* * Clear callbacks just for the one file. */ struct vcache *uvc; afs_allCBs++; if (a_fid->Vnode & 1) afs_oddCBs++; /*Could do this on volume basis, too */ else afs_evenCBs++; /*A particular fid was specified */ loop2: ObtainReadLock(&afs_xvcache); i = VCHash(&localFid); for (tvc = afs_vhashT[i]; tvc; tvc = uvc) { uvc = tvc->hnext; if (tvc->f.fid.Fid.Vnode == a_fid->Vnode && tvc->f.fid.Fid.Volume == a_fid->Volume && tvc->f.fid.Fid.Unique == a_fid->Unique) { tvc->callback = NULL; tvc->dchint = NULL; /* invalidate hints */ if (tvc->f.states & CVInit) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop2; } #if defined(AFS_SGI_ENV) || defined(AFS_SUN5_ENV) || defined(AFS_HPUX_ENV) || defined(AFS_LINUX20_ENV) AFS_FAST_HOLD(tvc); #else #ifdef AFS_DARWIN80_ENV if (tvc->f.states & CDeadVnode) { if (!(tvc->f.states & CBulkFetching)) { ReleaseReadLock(&afs_xvcache); afs_osi_Sleep(&tvc->f.states); goto loop2; } } vp = AFSTOV(tvc); if (vnode_get(vp)) continue; if (vnode_ref(vp)) { AFS_GUNLOCK(); vnode_put(vp); AFS_GLOCK(); continue; } if (tvc->f.states & (CBulkFetching|CDeadVnode)) { AFS_GUNLOCK(); vnode_recycle(AFSTOV(tvc)); AFS_GLOCK(); } #else AFS_FAST_HOLD(tvc); #endif #endif ReleaseReadLock(&afs_xvcache); ObtainWriteLock(&afs_xcbhash, 450); afs_DequeueCallback(tvc); tvc->f.states &= ~(CStatd | CUnique | CBulkFetching); ReleaseWriteLock(&afs_xcbhash); if ((tvc->f.fid.Fid.Vnode & 1 || (vType(tvc) == VDIR))) osi_dnlc_purgedp(tvc); afs_Trace3(afs_iclSetp, CM_TRACE_CALLBACK, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, tvc->f.states, ICL_TYPE_LONG, 0); #ifdef CBDEBUG lastCallBack_vnode = afid->Vnode; lastCallBack_dv = tvc->mstat.DataVersion.low; osi_GetuTime(&lastCallBack_time); #endif /* CBDEBUG */ #ifdef AFS_DARWIN80_ENV vnode_put(AFSTOV(tvc)); #endif ObtainReadLock(&afs_xvcache); uvc = tvc->hnext; AFS_FAST_RELE(tvc); } } /*Walk through hash table */ ReleaseReadLock(&afs_xvcache); } /*Clear callbacks for one file */ } /*Fid has non-zero volume ID */ /* * Always return a predictable value. */ return (0); } /*ClearCallBack */