/* * This is almost exactly like the PFlush() routine in afs_pioctl.c, * but that routine is static. We are about to change a file from * bypassing caching to normal caching. Therefore, we want to * throw out any existing VM pages for the file. We keep track of * the number of times we go back and forth from caching to bypass. */ void afs_TransitionToCaching(struct vcache *avc, afs_ucred_t *acred, int aflags) { int resetDesire = 0; int setManual = 0; if (!avc) return; if (aflags & TRANSChangeDesiredBit) resetDesire = 1; if (aflags & TRANSSetManualBit) setManual = 1; #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */ #else AFS_GLOCK(); #endif ObtainWriteLock(&avc->lock, 926); /* * Someone may have beat us to doing the transition - we had no lock * when we checked the flag earlier. No cause to panic, just return. */ if (!(avc->cachingStates & FCSBypass)) goto done; /* Ok, we actually do need to flush */ ObtainWriteLock(&afs_xcbhash, 957); afs_DequeueCallback(avc); avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */ ReleaseWriteLock(&afs_xcbhash); /* now find the disk cache entries */ afs_TryToSmush(avc, acred, 1); osi_dnlc_purgedp(avc); if (avc->linkData && !(avc->f.states & CCore)) { afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1); avc->linkData = NULL; } avc->cachingStates &= ~(FCSBypass); /* Reset the bypass flag */ if (resetDesire) avc->cachingStates &= ~(FCSDesireBypass); if (setManual) avc->cachingStates |= FCSManuallySet; avc->cachingTransitions++; done: ReleaseWriteLock(&avc->lock); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&avc->pvnLock, avc); #else AFS_GUNLOCK(); #endif }
/* * This is almost exactly like the PFlush() routine in afs_pioctl.c, * but that routine is static. We are about to change a file from * normal caching to bypass it's caching. Therefore, we want to * free up any cache space in use by the file, and throw out any * existing VM pages for the file. We keep track of the number of * times we go back and forth from caching to bypass. */ void afs_TransitionToBypass(struct vcache *avc, afs_ucred_t *acred, int aflags) { afs_int32 code; struct vrequest treq; int setDesire = 0; int setManual = 0; if (!avc) return; if (aflags & TRANSChangeDesiredBit) setDesire = 1; if (aflags & TRANSSetManualBit) setManual = 1; #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */ #else AFS_GLOCK(); #endif ObtainWriteLock(&avc->lock, 925); /* * Someone may have beat us to doing the transition - we had no lock * when we checked the flag earlier. No cause to panic, just return. */ if (avc->cachingStates & FCSBypass) goto done; /* If we never cached this, just change state */ if (setDesire && (!(avc->cachingStates & FCSBypass))) { avc->cachingStates |= FCSBypass; goto done; } /* cg2v, try to store any chunks not written 20071204 */ if (avc->execsOrWriters > 0) { code = afs_InitReq(&treq, acred); if (!code) code = afs_StoreAllSegments(avc, &treq, AFS_SYNC | AFS_LASTSTORE); } #if 0 /* also cg2v, don't dequeue the callback */ ObtainWriteLock(&afs_xcbhash, 956); afs_DequeueCallback(avc); ReleaseWriteLock(&afs_xcbhash); #endif avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat */ /* now find the disk cache entries */ afs_TryToSmush(avc, acred, 1); osi_dnlc_purgedp(avc); if (avc->linkData && !(avc->f.states & CCore)) { afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1); avc->linkData = NULL; } avc->cachingStates |= FCSBypass; /* Set the bypass flag */ if(setDesire) avc->cachingStates |= FCSDesireBypass; if(setManual) avc->cachingStates |= FCSManuallySet; avc->cachingTransitions++; done: ReleaseWriteLock(&avc->lock); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&avc->pvnLock, avc); #else AFS_GUNLOCK(); #endif }
afs_open(struct vcache **avcp, afs_int32 aflags, afs_ucred_t *acred) #endif { afs_int32 code; struct vrequest treq; struct vcache *tvc; int writing; struct afs_fakestat_state fakestate; AFS_STATCNT(afs_open); if ((code = afs_InitReq(&treq, acred))) return code; #ifdef AFS_SGI64_ENV /* avcpp can be, but is not necesarily, bhp's vnode. */ tvc = VTOAFS(BHV_TO_VNODE(bhv)); #else tvc = *avcp; #endif afs_Trace2(afs_iclSetp, CM_TRACE_OPEN, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, aflags); afs_InitFakeStat(&fakestate); AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&tvc, &fakestate, &treq); if (code) goto done; code = afs_VerifyVCache(tvc, &treq); if (code) goto done; ObtainReadLock(&tvc->lock); if (AFS_IS_DISCONNECTED && (afs_DCacheMissingChunks(tvc) != 0)) { ReleaseReadLock(&tvc->lock); /* printf("Network is down in afs_open: missing chunks\n"); */ code = ENETDOWN; goto done; } ReleaseReadLock(&tvc->lock); if (aflags & (FWRITE | FTRUNC)) writing = 1; else writing = 0; if (vType(tvc) == VDIR) { /* directory */ if (writing) { code = EISDIR; goto done; } else { if (!afs_AccessOK (tvc, ((tvc->f.states & CForeign) ? PRSFS_READ : PRSFS_LOOKUP), &treq, CHECK_MODE_BITS)) { code = EACCES; /* printf("afs_Open: no access for dir\n"); */ goto done; } } } else { #ifdef AFS_SUN5_ENV if (AFS_NFSXLATORREQ(acred) && (aflags & FREAD)) { if (!afs_AccessOK (tvc, PRSFS_READ, &treq, CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) { code = EACCES; goto done; } } #endif #ifdef AFS_AIX41_ENV if (aflags & FRSHARE) { /* * Hack for AIX 4.1: * Apparently it is possible for a file to get mapped without * either VNOP_MAP or VNOP_RDWR being called, if (1) it is a * sharable library, and (2) it has already been loaded. We must * ensure that the credp is up to date. We detect the situation * by checking for O_RSHARE at open time. */ /* * We keep the caller's credentials since an async daemon will * handle the request at some point. We assume that the same * credentials will be used. */ ObtainWriteLock(&tvc->lock, 140); if (!tvc->credp || (tvc->credp != acred)) { crhold(acred); if (tvc->credp) { struct ucred *crp = tvc->credp; tvc->credp = NULL; crfree(crp); } tvc->credp = acred; } ReleaseWriteLock(&tvc->lock); } #endif /* normal file or symlink */ osi_FlushText(tvc); /* only needed to flush text if text locked last time */ #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&tvc->pvnLock, tvc); #endif osi_FlushPages(tvc, acred); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&tvc->pvnLock, tvc); #endif } /* set date on file if open in O_TRUNC mode */ if (aflags & FTRUNC) { /* this fixes touch */ ObtainWriteLock(&tvc->lock, 123); tvc->f.m.Date = osi_Time(); tvc->f.states |= CDirty; ReleaseWriteLock(&tvc->lock); } ObtainReadLock(&tvc->lock); if (writing) tvc->execsOrWriters++; tvc->opens++; #if defined(AFS_SGI_ENV) || defined (AFS_LINUX26_ENV) if (writing && tvc->cred == NULL) { crhold(acred); tvc->cred = acred; } #endif ReleaseReadLock(&tvc->lock); if ((afs_preCache != 0) && (writing == 0) && (vType(tvc) != VDIR) && (!afs_BBusy())) { struct dcache *tdc; afs_size_t offset, len; tdc = afs_GetDCache(tvc, 0, &treq, &offset, &len, 1); ObtainSharedLock(&tdc->mflock, 865); if (!(tdc->mflags & DFFetchReq)) { struct brequest *bp; /* start the daemon (may already be running, however) */ UpgradeSToWLock(&tdc->mflock, 666); tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */ /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done, since we don't want to wait for it to finish before doing so ourselves. */ bp = afs_BQueue(BOP_FETCH, tvc, B_DONTWAIT, 0, acred, (afs_size_t) 0, (afs_size_t) 1, tdc, (void *)0, (void *)0); if (!bp) { tdc->mflags &= ~DFFetchReq; } ReleaseWriteLock(&tdc->mflock); } else { ReleaseSharedLock(&tdc->mflock); } } done: afs_PutFakeStat(&fakestate); AFS_DISCON_UNLOCK(); code = afs_CheckCode(code, &treq, 4); /* avoid AIX -O bug */ afs_Trace2(afs_iclSetp, CM_TRACE_OPEN, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, 999999); return code; }
int afsrename(struct vcache *aodp, char *aname1, struct vcache *andp, char *aname2, struct AFS_UCRED *acred, struct vrequest *areq) { register struct afs_conn *tc; register afs_int32 code = 0; afs_int32 returnCode; int oneDir, doLocally; afs_size_t offset, len; struct VenusFid unlinkFid, fileFid; struct vcache *tvc; struct dcache *tdc1, *tdc2; struct AFSFetchStatus OutOldDirStatus, OutNewDirStatus; struct AFSVolSync tsync; XSTATS_DECLS; AFS_STATCNT(afs_rename); afs_Trace4(afs_iclSetp, CM_TRACE_RENAME, ICL_TYPE_POINTER, aodp, ICL_TYPE_STRING, aname1, ICL_TYPE_POINTER, andp, ICL_TYPE_STRING, aname2); if (strlen(aname1) > AFSNAMEMAX || strlen(aname2) > AFSNAMEMAX) { code = ENAMETOOLONG; goto done; } /* verify the latest versions of the stat cache entries */ tagain: code = afs_VerifyVCache(aodp, areq); if (code) goto done; code = afs_VerifyVCache(andp, areq); if (code) goto done; /* lock in appropriate order, after some checks */ if (aodp->f.fid.Cell != andp->f.fid.Cell || aodp->f.fid.Fid.Volume != andp->f.fid.Fid.Volume) { code = EXDEV; goto done; } oneDir = 0; code = 0; if (andp->f.fid.Fid.Vnode == aodp->f.fid.Fid.Vnode) { if (!strcmp(aname1, aname2)) { /* Same directory and same name; this is a noop and just return success * to save cycles and follow posix standards */ code = 0; goto done; } if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { code = ENETDOWN; goto done; } ObtainWriteLock(&andp->lock, 147); tdc1 = afs_GetDCache(aodp, (afs_size_t) 0, areq, &offset, &len, 0); if (!tdc1) { code = ENOENT; } else { ObtainWriteLock(&tdc1->lock, 643); } tdc2 = tdc1; oneDir = 1; /* only one dude locked */ } else if ((andp->f.states & CRO) || (aodp->f.states & CRO)) { code = EROFS; goto done; } else if (andp->f.fid.Fid.Vnode < aodp->f.fid.Fid.Vnode) { ObtainWriteLock(&andp->lock, 148); /* lock smaller one first */ ObtainWriteLock(&aodp->lock, 149); tdc2 = afs_FindDCache(andp, (afs_size_t) 0); if (tdc2) ObtainWriteLock(&tdc2->lock, 644); tdc1 = afs_GetDCache(aodp, (afs_size_t) 0, areq, &offset, &len, 0); if (tdc1) ObtainWriteLock(&tdc1->lock, 645); else code = ENOENT; } else { ObtainWriteLock(&aodp->lock, 150); /* lock smaller one first */ ObtainWriteLock(&andp->lock, 557); tdc1 = afs_GetDCache(aodp, (afs_size_t) 0, areq, &offset, &len, 0); if (tdc1) ObtainWriteLock(&tdc1->lock, 646); else code = ENOENT; tdc2 = afs_FindDCache(andp, (afs_size_t) 0); if (tdc2) ObtainWriteLock(&tdc2->lock, 647); } osi_dnlc_remove(aodp, aname1, 0); osi_dnlc_remove(andp, aname2, 0); /* * Make sure that the data in the cache is current. We may have * received a callback while we were waiting for the write lock. */ if (tdc1) { if (!(aodp->f.states & CStatd) || !hsame(aodp->f.m.DataVersion, tdc1->f.versionNo)) { ReleaseWriteLock(&aodp->lock); if (!oneDir) { if (tdc2) { ReleaseWriteLock(&tdc2->lock); afs_PutDCache(tdc2); } ReleaseWriteLock(&andp->lock); } ReleaseWriteLock(&tdc1->lock); afs_PutDCache(tdc1); goto tagain; } } if (code == 0) code = afs_dir_Lookup(tdc1, aname1, &fileFid.Fid); if (code) { if (tdc1) { ReleaseWriteLock(&tdc1->lock); afs_PutDCache(tdc1); } ReleaseWriteLock(&aodp->lock); if (!oneDir) { if (tdc2) { ReleaseWriteLock(&tdc2->lock); afs_PutDCache(tdc2); } ReleaseWriteLock(&andp->lock); } goto done; } if (!AFS_IS_DISCON_RW) { /* Connected. */ do { tc = afs_Conn(&aodp->f.fid, areq, SHARED_LOCK); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RENAME); RX_AFS_GUNLOCK(); code = RXAFS_Rename(tc->id, (struct AFSFid *)&aodp->f.fid.Fid, aname1, (struct AFSFid *)&andp->f.fid.Fid, aname2, &OutOldDirStatus, &OutNewDirStatus, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, code, &andp->f.fid, areq, AFS_STATS_FS_RPCIDX_RENAME, SHARED_LOCK, NULL)); } else { #if defined(AFS_DISCON_ENV) /* Disconnected. */ /* Seek moved file vcache. */ fileFid.Cell = aodp->f.fid.Cell; fileFid.Fid.Volume = aodp->f.fid.Fid.Volume; ObtainSharedLock(&afs_xvcache, 754); tvc = afs_FindVCache(&fileFid, 0 , 1); ReleaseSharedLock(&afs_xvcache); if (tvc) { /* XXX - We're locking this vcache whilst holding dcaches. Ooops */ ObtainWriteLock(&tvc->lock, 750); if (!(tvc->f.ddirty_flags & (VDisconRename|VDisconCreate))) { /* If the vnode was created locally, then we don't care * about recording the rename - we'll do it automatically * on replay. If we've already renamed, we've already stored * the required information about where we came from. */ if (!aodp->f.shadow.vnode) { /* Make shadow copy of parent dir only. */ afs_MakeShadowDir(aodp, tdc1); } /* Save old parent dir fid so it will be searchable * in the shadow dir. */ tvc->f.oldParent.vnode = aodp->f.fid.Fid.Vnode; tvc->f.oldParent.unique = aodp->f.fid.Fid.Unique; afs_DisconAddDirty(tvc, VDisconRename | (oneDir ? VDisconRenameSameDir:0), 1); } ReleaseWriteLock(&tvc->lock); afs_PutVCache(tvc); } else { code = ENOENT; } /* if (tvc) */ #endif } /* if !(AFS_IS_DISCON_RW)*/ returnCode = code; /* remember for later */ /* Now we try to do things locally. This is really loathsome code. */ unlinkFid.Fid.Vnode = 0; if (code == 0) { /* In any event, we don't really care if the data (tdc2) is not * in the cache; if it isn't, we won't do the update locally. */ /* see if version numbers increased properly */ doLocally = 1; if (!AFS_IS_DISCON_RW) { if (oneDir) { /* number increases by 1 for whole rename operation */ if (!afs_LocalHero(aodp, tdc1, &OutOldDirStatus, 1)) { doLocally = 0; } } else { /* two separate dirs, each increasing by 1 */ if (!afs_LocalHero(aodp, tdc1, &OutOldDirStatus, 1)) doLocally = 0; if (!afs_LocalHero(andp, tdc2, &OutNewDirStatus, 1)) doLocally = 0; if (!doLocally) { if (tdc1) { ZapDCE(tdc1); DZap(tdc1); } if (tdc2) { ZapDCE(tdc2); DZap(tdc2); } } } } /* if (!AFS_IS_DISCON_RW) */ /* now really do the work */ if (doLocally) { /* first lookup the fid of the dude we're moving */ code = afs_dir_Lookup(tdc1, aname1, &fileFid.Fid); if (code == 0) { /* delete the source */ code = afs_dir_Delete(tdc1, aname1); } /* first see if target is there */ if (code == 0 && afs_dir_Lookup(tdc2, aname2, &unlinkFid.Fid) == 0) { /* target already exists, and will be unlinked by server */ code = afs_dir_Delete(tdc2, aname2); } if (code == 0) { ObtainWriteLock(&afs_xdcache, 292); code = afs_dir_Create(tdc2, aname2, &fileFid.Fid); ReleaseWriteLock(&afs_xdcache); } if (code != 0) { ZapDCE(tdc1); DZap(tdc1); if (!oneDir) { ZapDCE(tdc2); DZap(tdc2); } } } /* update dir link counts */ if (AFS_IS_DISCON_RW) { if (!oneDir) { aodp->f.m.LinkCount--; andp->f.m.LinkCount++; } /* If we're in the same directory, link count doesn't change */ } else { aodp->f.m.LinkCount = OutOldDirStatus.LinkCount; if (!oneDir) andp->f.m.LinkCount = OutNewDirStatus.LinkCount; } } else { /* operation failed (code != 0) */ if (code < 0) { /* if failed, server might have done something anyway, and * assume that we know about it */ ObtainWriteLock(&afs_xcbhash, 498); afs_DequeueCallback(aodp); afs_DequeueCallback(andp); andp->f.states &= ~CStatd; aodp->f.states &= ~CStatd; ReleaseWriteLock(&afs_xcbhash); osi_dnlc_purgedp(andp); osi_dnlc_purgedp(aodp); } } /* release locks */ if (tdc1) { ReleaseWriteLock(&tdc1->lock); afs_PutDCache(tdc1); } if ((!oneDir) && tdc2) { ReleaseWriteLock(&tdc2->lock); afs_PutDCache(tdc2); } ReleaseWriteLock(&aodp->lock); if (!oneDir) { ReleaseWriteLock(&andp->lock); } if (returnCode) { code = returnCode; goto done; } /* now, some more details. if unlinkFid.Fid.Vnode then we should decrement * the link count on this file. Note that if fileFid is a dir, then we don't * have to invalidate its ".." entry, since its DataVersion # should have * changed. However, interface is not good enough to tell us the * *file*'s new DataVersion, so we're stuck. Our hack: delete mark * the data as having an "unknown" version (effectively discarding the ".." * entry */ if (unlinkFid.Fid.Vnode) { unlinkFid.Fid.Volume = aodp->f.fid.Fid.Volume; unlinkFid.Cell = aodp->f.fid.Cell; tvc = NULL; if (!unlinkFid.Fid.Unique) { tvc = afs_LookupVCache(&unlinkFid, areq, NULL, aodp, aname1); } if (!tvc) /* lookup failed or wasn't called */ tvc = afs_GetVCache(&unlinkFid, areq, NULL, NULL); if (tvc) { #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&tvc->pvnLock, tvc); /* Since afs_TryToSmush will do a pvn_vptrunc */ #endif ObtainWriteLock(&tvc->lock, 151); tvc->f.m.LinkCount--; tvc->f.states &= ~CUnique; /* For the dfs xlator */ if (tvc->f.m.LinkCount == 0 && !osi_Active(tvc)) { /* if this was last guy (probably) discard from cache. * We have to be careful to not get rid of the stat * information, since otherwise operations will start * failing even if the file was still open (or * otherwise active), and the server no longer has the * info. If the file still has valid links, we'll get * a break-callback msg from the server, so it doesn't * matter that we don't discard the status info */ if (!AFS_NFSXLATORREQ(acred)) afs_TryToSmush(tvc, acred, 0); } ReleaseWriteLock(&tvc->lock); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&tvc->pvnLock, tvc); #endif afs_PutVCache(tvc); } } /* now handle ".." invalidation */ if (!oneDir) { fileFid.Fid.Volume = aodp->f.fid.Fid.Volume; fileFid.Cell = aodp->f.fid.Cell; if (!fileFid.Fid.Unique) tvc = afs_LookupVCache(&fileFid, areq, NULL, andp, aname2); else tvc = afs_GetVCache(&fileFid, areq, NULL, (struct vcache *)0); if (tvc && (vType(tvc) == VDIR)) { ObtainWriteLock(&tvc->lock, 152); tdc1 = afs_FindDCache(tvc, (afs_size_t) 0); if (tdc1) { if (AFS_IS_DISCON_RW) { #if defined(AFS_DISCON_ENV) /* If disconnected, we need to fix (not discard) the "..".*/ afs_dir_ChangeFid(tdc1, "..", &aodp->f.fid.Fid.Vnode, &andp->f.fid.Fid.Vnode); #endif } else { ObtainWriteLock(&tdc1->lock, 648); ZapDCE(tdc1); /* mark as unknown */ DZap(tdc1); ReleaseWriteLock(&tdc1->lock); afs_PutDCache(tdc1); /* put it back */ } } osi_dnlc_remove(tvc, "..", 0); ReleaseWriteLock(&tvc->lock); afs_PutVCache(tvc); } else if (AFS_IS_DISCON_RW && tvc && (vType(tvc) == VREG)) { /* XXX - Should tvc not get locked here? */ tvc->f.parent.vnode = andp->f.fid.Fid.Vnode; tvc->f.parent.unique = andp->f.fid.Fid.Unique; } else if (tvc) { /* True we shouldn't come here since tvc SHOULD be a dir, but we * 'syntactically' need to unless we change the 'if' above... */ afs_PutVCache(tvc); } } code = returnCode; done: return code; }
int afsremove(register struct vcache *adp, register struct dcache *tdc, register struct vcache *tvc, char *aname, afs_ucred_t *acred, struct vrequest *treqp) { register afs_int32 code = 0; register struct afs_conn *tc; struct AFSFetchStatus OutDirStatus; struct AFSVolSync tsync; XSTATS_DECLS; if (!AFS_IS_DISCONNECTED) { do { tc = afs_Conn(&adp->f.fid, treqp, SHARED_LOCK); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEFILE); RX_AFS_GUNLOCK(); code = RXAFS_RemoveFile(tc->id, (struct AFSFid *)&adp->f.fid.Fid, aname, &OutDirStatus, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, code, &adp->f.fid, treqp, AFS_STATS_FS_RPCIDX_REMOVEFILE, SHARED_LOCK, NULL)); } osi_dnlc_remove(adp, aname, tvc); if (code) { if (tdc) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } if (tvc) afs_PutVCache(tvc); if (code < 0) { ObtainWriteLock(&afs_xcbhash, 497); afs_DequeueCallback(adp); adp->f.states &= ~CStatd; ReleaseWriteLock(&afs_xcbhash); osi_dnlc_purgedp(adp); } ReleaseWriteLock(&adp->lock); code = afs_CheckCode(code, treqp, 21); return code; } if (tdc) UpgradeSToWLock(&tdc->lock, 637); if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, &OutDirStatus, 1)) { /* we can do it locally */ code = afs_dir_Delete(tdc, aname); if (code) { ZapDCE(tdc); /* surprise error -- invalid value */ DZap(tdc); } } if (tdc) { ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); /* drop ref count */ } ReleaseWriteLock(&adp->lock); /* now, get vnode for unlinked dude, and see if we should force it * from cache. adp is now the deleted files vnode. Note that we * call FindVCache instead of GetVCache since if the file's really * gone, we won't be able to fetch the status info anyway. */ if (tvc) { afs_MarinerLog("store$Removing", tvc); #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&tvc->pvnLock, tvc); /* Since afs_TryToSmush will do a pvn_vptrunc */ #endif ObtainWriteLock(&tvc->lock, 141); /* note that callback will be broken on the deleted file if there are * still >0 links left to it, so we'll get the stat right */ tvc->f.m.LinkCount--; tvc->f.states &= ~CUnique; /* For the dfs xlator */ if (tvc->f.m.LinkCount == 0 && !osi_Active(tvc)) { if (!AFS_NFSXLATORREQ(acred)) afs_TryToSmush(tvc, acred, 0); } ReleaseWriteLock(&tvc->lock); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&tvc->pvnLock, tvc); #endif afs_PutVCache(tvc); } return (0); }
int afs_getattr(OSI_VC_DECL(avc), struct vattr *attrs, afs_ucred_t *acred) #endif { afs_int32 code; struct vrequest treq; struct unixuser *au; int inited = 0; OSI_VC_CONVERT(avc); AFS_STATCNT(afs_getattr); afs_Trace2(afs_iclSetp, CM_TRACE_GETATTR, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); if (afs_fakestat_enable && avc->mvstat == 1) { struct afs_fakestat_state fakestat; code = afs_InitReq(&treq, acred); if (code) return code; afs_InitFakeStat(&fakestat); code = afs_TryEvalFakeStat(&avc, &fakestat, &treq); if (code) { afs_PutFakeStat(&fakestat); return code; } code = afs_CopyOutAttrs(avc, attrs); afs_PutFakeStat(&fakestat); return code; } #if defined(AFS_SUN5_ENV) if (flags & ATTR_HINT) { code = afs_CopyOutAttrs(avc, attrs); return code; } #endif #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV) if (avc->f.states & CUBCinit) { code = afs_CopyOutAttrs(avc, attrs); return code; } #endif AFS_DISCON_LOCK(); #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&avc->pvnLock, avc); #endif if (afs_shuttingdown) return EIO; if (!(avc->f.states & CStatd)) { if (!(code = afs_InitReq(&treq, acred))) { code = afs_VerifyVCache2(avc, &treq); inited = 1; } } else code = 0; #if defined(AFS_SUN5_ENV) || defined(AFS_BOZONLOCK_ENV) if (code == 0) osi_FlushPages(avc, acred); #endif #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&avc->pvnLock, avc); #endif if (code == 0) { osi_FlushText(avc); /* only needed to flush text if text locked last time */ code = afs_CopyOutAttrs(avc, attrs); if (afs_nfsexporter) { if (!inited) { if ((code = afs_InitReq(&treq, acred))) return code; inited = 1; } if (AFS_NFSXLATORREQ(acred)) { if ((vType(avc) != VDIR) && !afs_AccessOK(avc, PRSFS_READ, &treq, CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) { return EACCES; } } if ((au = afs_FindUser(treq.uid, -1, READ_LOCK))) { struct afs_exporter *exporter = au->exporter; if (exporter && !(afs_nfsexporter->exp_states & EXP_UNIXMODE)) { unsigned int ubits; /* * If the remote user wishes to enforce default Unix mode semantics, * like in the nfs exporter case, we OR in the user bits * into the group and other bits. We need to do this * because there is no RFS_ACCESS call and thus nfs * clients implement nfs_access by interpreting the * mode bits in the traditional way, which of course * loses with afs. */ ubits = (attrs->va_mode & 0700) >> 6; attrs->va_mode = attrs->va_mode | ubits | (ubits << 3); /* If it's the root of AFS, replace the inode number with the * inode number of the mounted on directory; otherwise this * confuses getwd()... */ #ifdef AFS_LINUX22_ENV if (avc == afs_globalVp) { struct inode *ip = AFSTOV(avc)->i_sb->s_root->d_inode; attrs->va_nodeid = ip->i_ino; /* VTOI()? */ } #else if ( #if defined(AFS_DARWIN_ENV) vnode_isvroot(AFSTOV(avc)) #elif defined(AFS_NBSD50_ENV) AFSTOV(avc)->v_vflag & VV_ROOT #else AFSTOV(avc)->v_flag & VROOT #endif ) { struct vnode *vp = AFSTOV(avc); #ifdef AFS_DARWIN80_ENV /* XXX vp = vnode_mount(vp)->mnt_vnodecovered; */ vp = 0; #else vp = vp->v_vfsp->vfs_vnodecovered; if (vp) { /* Ignore weird failures */ #ifdef AFS_SGI62_ENV attrs->va_nodeid = VnodeToIno(vp); #else struct inode *ip; ip = (struct inode *)VTOI(vp); if (ip) /* Ignore weird failures */ attrs->va_nodeid = ip->i_number; #endif } #endif } #endif /* AFS_LINUX22_ENV */ } afs_PutUser(au, READ_LOCK); } }