/*! * Find the first dcache of a file that has the specified fid. * Similar to afs_FindDCache, only that it takes a fid instead * of a vcache and it can get the first dcache. * * \param afid * * \return The found dcache or NULL. */ struct dcache * afs_FindDCacheByFid(struct VenusFid *afid) { afs_int32 i, index; struct dcache *tdc = NULL; i = DVHash(afid); ObtainWriteLock(&afs_xdcache, 758); for (index = afs_dvhashTbl[i]; index != NULLIDX;) { if (afs_indexUnique[index] == afid->Fid.Unique) { tdc = afs_GetValidDSlot(index); if (tdc) { ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, afid)) { break; /* leaving refCount high for caller */ } afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); if (index == NULLIDX) tdc = NULL; return tdc; }
/*! * Generate a fake fid for a disconnected shadow dir. * Similar to afs_GenFakeFid, only that it uses the dhash * to search for a uniquifier because a shadow dir lives only * in the dcache. * * \param afid * * \note Don't forget to fill in afid with Cell and Volume. */ void afs_GenShadowFid(struct VenusFid *afid) { afs_uint32 i, index, max_unique = 1; struct vcache *tvc = NULL; /* Try generating a fid that isn't used in the vhash. */ do { /* Shadow Fids are always directories */ afid->Fid.Vnode = afs_DisconVnode + 1; i = DVHash(afid); ObtainWriteLock(&afs_xdcache, 737); for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) { i = afs_dvnextTbl[index]; if (afs_indexUnique[index] > max_unique) max_unique = afs_indexUnique[index]; } ReleaseWriteLock(&afs_xdcache); afid->Fid.Unique = max_unique + 1; afs_DisconVnode += 2; if (!afs_DisconVnode) afs_DisconVnode = 2; /* Is this a used vnode? */ ObtainSharedLock(&afs_xvcache, 762); tvc = afs_FindVCache(afid, 0, 1); ReleaseSharedLock(&afs_xvcache); if (tvc) afs_PutVCache(tvc); } while (tvc); }
/* * afs_TruncateAllSegments * * Description: * Truncate a cache file. * * Parameters: * avc : Ptr to vcache entry to truncate. * alen : Number of bytes to make the file. * areq : Ptr to request structure. * * Environment: * Called with avc write-locked; in VFS40 systems, pvnLock is also * held. */ int afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq, afs_ucred_t *acred) { struct dcache *tdc; afs_int32 code; afs_int32 index; afs_size_t newSize; int dcCount, dcPos; struct dcache **tdcArray = NULL; AFS_STATCNT(afs_TruncateAllSegments); avc->f.m.Date = osi_Time(); afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen)); if (alen >= avc->f.m.Length) { /* * Special speedup since Sun's vm extends the file this way; * we've never written to the file thus we can just set the new * length and avoid the needless calls below. * Also used for ftruncate calls which can extend the file. * To completely minimize the possible extra StoreMini RPC, we really * should keep the ExtendedPos as well and clear this flag if we * truncate below that value before we store the file back. */ avc->f.states |= CExtendedFile; avc->f.m.Length = alen; return 0; } #if (defined(AFS_SUN5_ENV)) /* Zero unused portion of last page */ osi_VM_PreTruncate(avc, alen, acred); #endif #if (defined(AFS_SUN5_ENV)) ObtainWriteLock(&avc->vlock, 546); avc->activeV++; /* Block new getpages */ ReleaseWriteLock(&avc->vlock); #endif ReleaseWriteLock(&avc->lock); AFS_GUNLOCK(); /* Flush pages beyond end-of-file. */ osi_VM_Truncate(avc, alen, acred); AFS_GLOCK(); ObtainWriteLock(&avc->lock, 79); avc->f.m.Length = alen; if (alen < avc->f.truncPos) avc->f.truncPos = alen; code = DVHash(&avc->f.fid); /* block out others from screwing with this table */ ObtainWriteLock(&afs_xdcache, 287); dcCount = 0; for (index = afs_dvhashTbl[code]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { ReleaseWriteLock(&afs_xdcache); code = EIO; goto done; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) dcCount++; afs_PutDCache(tdc); } index = afs_dvnextTbl[index]; } /* Now allocate space where we can save those dcache entries, and * do a second pass over them.. Since we're holding xdcache, it * shouldn't be changing. */ tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *)); dcPos = 0; for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* make sure we put back all of the tdcArray members before * bailing out */ /* remember, the last valid tdc is at dcPos-1, so start at * dcPos-1, not at dcPos itself. */ for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) { tdc = tdcArray[dcPos]; afs_PutDCache(tdc); } code = EIO; goto done; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) { /* same file, and modified, we'll store it back */ if (dcPos < dcCount) { tdcArray[dcPos++] = tdc; } else { afs_PutDCache(tdc); } } else { afs_PutDCache(tdc); } } } ReleaseWriteLock(&afs_xdcache); /* Now we loop over the array of dcache entries and truncate them */ for (index = 0; index < dcPos; index++) { struct osi_file *tfile; tdc = tdcArray[index]; newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk); if (newSize < 0) newSize = 0; ObtainSharedLock(&tdc->lock, 672); if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) { UpgradeSToWLock(&tdc->lock, 673); tdc->f.states |= DWriting; tfile = afs_CFileOpen(&tdc->f.inode); afs_CFileTruncate(tfile, (afs_int32)newSize); afs_CFileClose(tfile); afs_AdjustSize(tdc, (afs_int32)newSize); if (alen < tdc->validPos) { if (alen < AFS_CHUNKTOBASE(tdc->f.chunk)) tdc->validPos = 0; else tdc->validPos = alen; } ConvertWToSLock(&tdc->lock); } ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } code = 0; done: if (tdcArray) { osi_Free(tdcArray, dcCount * sizeof(struct dcache *)); } #if (defined(AFS_SUN5_ENV)) ObtainWriteLock(&avc->vlock, 547); if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) { avc->vstates &= ~VRevokeWait; afs_osi_Wakeup((char *)&avc->vstates); } ReleaseWriteLock(&avc->vlock); #endif return code; }
int afs_InvalidateAllSegments(struct vcache *avc) { struct dcache *tdc; afs_int32 hash; afs_int32 index; struct dcache **dcList; int i, dcListMax, dcListCount; AFS_STATCNT(afs_InvalidateAllSegments); afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); hash = DVHash(&avc->f.fid); avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */ avc->f.states &= ~CExtendedFile; /* not any more */ ObtainWriteLock(&afs_xcbhash, 459); afs_DequeueCallback(avc); avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */ ReleaseWriteLock(&afs_xcbhash); if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR)) osi_dnlc_purgedp(avc); /* Blow away pages; for now, only for Solaris */ #if (defined(AFS_SUN5_ENV)) if (WriteLocked(&avc->lock)) osi_ReleaseVM(avc, (afs_ucred_t *)0); #endif /* * Block out others from screwing with this table; is a read lock * sufficient? */ ObtainWriteLock(&afs_xdcache, 286); dcListMax = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* In the case of fatal errors during stores, we MUST * invalidate all of the relevant chunks. Otherwise, the chunks * will be left with the 'new' data that was never successfully * written to the server, but the DV in the dcache is still the * old DV. So, we may indefinitely serve data to applications * that is not actually in the file on the fileserver. If we * cannot afs_GetValidDSlot the appropriate entries, currently * there is no way to ensure the dcache is invalidated. So for * now, to avoid risking serving bad data from the cache, panic * instead. */ osi_Panic("afs_InvalidateAllSegments tdc count"); } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) dcListMax++; afs_PutDCache(tdc); } index = afs_dvnextTbl[index]; } dcList = osi_Alloc(dcListMax * sizeof(struct dcache *)); dcListCount = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* We cannot proceed after getting this error; we risk serving * incorrect data to applications. So panic instead. See the * above comment next to the previous afs_GetValidDSlot call * for details. */ osi_Panic("afs_InvalidateAllSegments tdc store"); } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) { /* same file? we'll zap it */ if (afs_indexFlags[index] & IFDataMod) { afs_stats_cmperf.cacheCurrDirtyChunks--; /* don't write it back */ afs_indexFlags[index] &= ~IFDataMod; } afs_indexFlags[index] &= ~IFAnyPages; if (dcListCount < dcListMax) dcList[dcListCount++] = tdc; else afs_PutDCache(tdc); } else { afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); for (i = 0; i < dcListCount; i++) { tdc = dcList[i]; ObtainWriteLock(&tdc->lock, 679); ZapDCE(tdc); if (vType(avc) == VDIR) DZap(tdc); ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } osi_Free(dcList, dcListMax * sizeof(struct dcache *)); return 0; }
int afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq, int sync) { struct dcache *tdc; afs_int32 code = 0; afs_int32 index; afs_int32 origCBs, foreign = 0; int hash; afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */ struct dcache **dcList; unsigned int i, j, minj, moredata, high, off; afs_size_t maxStoredLength; /* highest offset we've written to server. */ int safety, marineronce = 0; AFS_STATCNT(afs_StoreAllSegments); hset(oldDV, avc->f.m.DataVersion); hset(newDV, avc->f.m.DataVersion); hash = DVHash(&avc->f.fid); foreign = (avc->f.states & CForeign); dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ); afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV) /* In the aix vm implementation we need to do the vm_writep even * on the memcache case since that's we adjust the file's size * and finish flushing partial vm pages. */ if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) || (sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) || (sync & AFS_LASTSTORE)) #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */ { /* If we're not diskless, reading a file may stress the VM * system enough to cause a pageout, and this vnode would be * locked when the pageout occurs. We can prevent this problem * by making sure all dirty pages are already flushed. We don't * do this when diskless because reading a diskless (i.e. * memory-resident) chunk doesn't require using new VM, and we * also don't want to dump more dirty data into a diskless cache, * since they're smaller, and we might exceed its available * space. */ #if defined(AFS_SUN5_ENV) if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */ osi_VM_TryToSmush(avc, CRED(), 1); else #endif osi_VM_StoreAllSegments(avc); } if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) { /* This will probably make someone sad ... */ /*printf("Net down in afs_StoreSegments\n");*/ return ENETDOWN; } ConvertWToSLock(&avc->lock); /* * Subsequent code expects a sorted list, and it expects all the * chunks in the list to be contiguous, so we need a sort and a * while loop in here, too - but this will work for a first pass... * 92.10.05 - OK, there's a sort in here now. It's kind of a modified * bin sort, I guess. Chunk numbers start with 0 * * - Have to get a write lock on xdcache because GetDSlot might need it (if * the chunk doesn't have a dcache struct). * This seems like overkill in most cases. * - I'm not sure that it's safe to do "index = .hvNextp", then unlock * xdcache, then relock xdcache and try to use index. It is done * a lot elsewhere in the CM, but I'm not buying that argument. * - should be able to check IFDataMod without doing the GetDSlot (just * hold afs_xdcache). That way, it's easy to do this without the * writelock on afs_xdcache, and we save unneccessary disk * operations. I don't think that works, 'cuz the next pointers * are still on disk. */ origCBs = afs_allCBs; maxStoredLength = 0; minj = 0; do { memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *)); high = 0; moredata = FALSE; /* lock and start over from beginning of hash chain * in order to avoid a race condition. */ ObtainWriteLock(&afs_xdcache, 284); index = afs_dvhashTbl[hash]; for (j = 0; index != NULLIDX;) { if ((afs_indexFlags[index] & IFDataMod) && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) { tdc = afs_GetValidDSlot(index); /* refcount+1. */ if (!tdc) { ReleaseWriteLock(&afs_xdcache); code = EIO; goto done; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) { off = tdc->f.chunk - minj; if (off < NCHUNKSATONCE) { if (dcList[off]) osi_Panic("dclist slot already in use!"); if (afs_mariner && !marineronce) { /* first chunk only */ afs_MarinerLog("store$Storing", avc); marineronce++; } dcList[off] = tdc; if (off > high) high = off; j++; /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we * can't grab here, due to lock ordering with afs_xdcache. * So, disable this shortcut for now. -- kolya 2001-10-13 */ /* shortcut: big win for little files */ /* tlen -= tdc->f.chunkBytes; * if (tlen <= 0) * break; */ } else { moredata = TRUE; afs_PutDCache(tdc); if (j == NCHUNKSATONCE) break; } } else { afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); /* this guy writes chunks, puts back dcache structs, and bumps newDV */ /* "moredata" just says "there are more dirty chunks yet to come". */ if (j) { code = afs_CacheStoreVCache(dcList, avc, areq, sync, minj, high, moredata, &newDV, &maxStoredLength); /* Release any zero-length dcache entries in our interval * that we locked but didn't store back above. */ for (j = 0; j <= high; j++) { tdc = dcList[j]; if (tdc) { osi_Assert(tdc->f.chunkBytes == 0); ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } } } /* if (j) */ minj += NCHUNKSATONCE; } while (!code && moredata); done: UpgradeSToWLock(&avc->lock, 29); /* send a trivial truncation store if did nothing else */ if (code == 0) { /* * Call StoreMini if we haven't written enough data to extend the * file at the fileserver to the client's notion of the file length. */ if ((avc->f.truncPos != AFS_NOTRUNC) || ((avc->f.states & CExtendedFile) && (maxStoredLength < avc->f.m.Length))) { code = afs_StoreMini(avc, areq); if (code == 0) hadd32(newDV, 1); /* just bumped here, too */ } avc->f.states &= ~CExtendedFile; } /* * Finally, turn off DWriting, turn on DFEntryMod, * update f.versionNo. * A lot of this could be integrated into the loop above */ if (!code) { afs_hyper_t h_unset; hones(h_unset); minj = 0; do { moredata = FALSE; memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *)); /* overkill, but it gets the lock in case GetDSlot needs it */ ObtainWriteLock(&afs_xdcache, 285); for (j = 0, safety = 0, index = afs_dvhashTbl[hash]; index != NULLIDX && safety < afs_cacheFiles + 2; index = afs_dvnextTbl[index]) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* This is okay; since manipulating the dcaches at this * point is best-effort. We only get a dcache here to * increment the dv and turn off DWriting. If we were * supposed to do that for a dcache, but could not * due to an I/O error, it just means the dv won't * be updated so we don't be able to use that cached * chunk in the future. That's inefficient, but not * an error. */ continue; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) { off = tdc->f.chunk - minj; if (off < NCHUNKSATONCE) { /* this is the file, and the correct chunk range */ if (j >= NCHUNKSATONCE) osi_Panic ("Too many dcache entries in range\n"); dcList[j++] = tdc; } else { moredata = TRUE; afs_PutDCache(tdc); if (j == NCHUNKSATONCE) break; } } else { afs_PutDCache(tdc); } } } ReleaseWriteLock(&afs_xdcache); for (i = 0; i < j; i++) { /* Iterate over the dcache entries we collected above */ tdc = dcList[i]; ObtainSharedLock(&tdc->lock, 677); /* was code here to clear IFDataMod, but it should only be done * in storedcache and storealldcache. */ /* Only increase DV if we had up-to-date data to start with. * Otherwise, we could be falsely upgrading an old chunk * (that we never read) into one labelled with the current * DV #. Also note that we check that no intervening stores * occurred, otherwise we might mislabel cache information * for a chunk that we didn't store this time */ /* Don't update the version number if it's not yet set. */ if (!hsame(tdc->f.versionNo, h_unset) && hcmp(tdc->f.versionNo, oldDV) >= 0) { if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV)) || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) { /* no error, this is the DV */ UpgradeSToWLock(&tdc->lock, 678); hset(tdc->f.versionNo, avc->f.m.DataVersion); tdc->dflags |= DFEntryMod; /* DWriting may not have gotten cleared above, if all * we did was a StoreMini */ tdc->f.states &= ~DWriting; ConvertWToSLock(&tdc->lock); } } ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } minj += NCHUNKSATONCE; } while (moredata); } if (code) { /* * Invalidate chunks after an error for ccores files since * afs_inactive won't be called for these and they won't be * invalidated. Also discard data if it's a permanent error from the * fileserver. */ if (areq->permWriteError || (avc->f.states & CCore)) { afs_InvalidateAllSegments(avc); } } afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code); /* would like a Trace5, but it doesn't exist... */ afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32, avc->lock.excl_locked); afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32, avc->lock.readers_reading, ICL_TYPE_INT32, avc->lock.num_waiting); /* * Finally, if updated DataVersion matches newDV, we did all of the * stores. If mapDV indicates that the page cache was flushed up * to when we started the store, then we can relabel them as flushed * as recently as newDV. * Turn off CDirty bit because the stored data is now in sync with server. */ if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) { if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV)) || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) { hset(avc->mapDV, newDV); avc->f.states &= ~CDirty; } } osi_FreeLargeSpace(dcList); /* If not the final write a temporary error is ok. */ if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE)) code = 0; return code; } /*afs_StoreAllSegments (new 03/02/94) */
/*! * Handles all the reconnection details: * - Get all the details about the vnode: name, fid, and parent dir fid. * - Send data to server. * - Handle errors. * - Reorder vhash and dcaches in their hashes, using the newly acquired fid. */ int afs_ProcessOpCreate(struct vcache *avc, struct vrequest *areq, afs_ucred_t *acred) { char *tname = NULL, *ttargetName = NULL; struct AFSStoreStatus InStatus; struct AFSFetchStatus OutFidStatus, OutDirStatus; struct VenusFid pdir_fid, newFid; struct AFSCallBack CallBack; struct AFSVolSync tsync; struct vcache *tdp = NULL, *tvc = NULL; struct dcache *tdc = NULL; struct afs_conn *tc; struct rx_connection *rxconn; afs_int32 hash, new_hash, index; afs_size_t tlen; int code, op = 0; XSTATS_DECLS; tname = afs_osi_Alloc(AFSNAMEMAX); if (!tname) return ENOMEM; code = afs_GetParentVCache(avc, 0, &pdir_fid, tname, &tdp); if (code) goto end; /* This data may also be in linkData, but then we have to deal with * the joy of terminating NULLs and . and file modes. So just get * it from the dcache where it won't have been fiddled with. */ if (vType(avc) == VLNK) { afs_size_t offset; struct dcache *tdc; struct osi_file *tfile; tdc = afs_GetDCache(avc, 0, areq, &offset, &tlen, 0); if (!tdc) { code = ENOENT; goto end; } if (tlen > 1024) { afs_PutDCache(tdc); code = EFAULT; goto end; } tlen++; /* space for NULL */ ttargetName = afs_osi_Alloc(tlen); if (!ttargetName) { afs_PutDCache(tdc); return ENOMEM; } ObtainReadLock(&tdc->lock); tfile = afs_CFileOpen(&tdc->f.inode); code = afs_CFileRead(tfile, 0, ttargetName, tlen); ttargetName[tlen-1] = '\0'; afs_CFileClose(tfile); ReleaseReadLock(&tdc->lock); afs_PutDCache(tdc); } /* Set status. */ InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE | AFS_SETGROUP; InStatus.ClientModTime = avc->f.m.Date; InStatus.Owner = avc->f.m.Owner; InStatus.Group = (afs_int32) afs_cr_gid(acred); /* Only care about protection bits. */ InStatus.UnixModeBits = avc->f.m.Mode & 0xffff; do { tc = afs_Conn(&tdp->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { switch (vType(avc)) { case VREG: /* Make file on server. */ op = AFS_STATS_FS_RPCIDX_CREATEFILE; XSTATS_START_TIME(op); RX_AFS_GUNLOCK(); code = RXAFS_CreateFile(tc->id, (struct AFSFid *)&tdp->f.fid.Fid, tname, &InStatus, (struct AFSFid *) &newFid.Fid, &OutFidStatus, &OutDirStatus, &CallBack, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; break; case VDIR: /* Make dir on server. */ op = AFS_STATS_FS_RPCIDX_MAKEDIR; XSTATS_START_TIME(op); RX_AFS_GUNLOCK(); code = RXAFS_MakeDir(rxconn, (struct AFSFid *) &tdp->f.fid.Fid, tname, &InStatus, (struct AFSFid *) &newFid.Fid, &OutFidStatus, &OutDirStatus, &CallBack, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; break; case VLNK: /* Make symlink on server. */ op = AFS_STATS_FS_RPCIDX_SYMLINK; XSTATS_START_TIME(op); RX_AFS_GUNLOCK(); code = RXAFS_Symlink(rxconn, (struct AFSFid *) &tdp->f.fid.Fid, tname, ttargetName, &InStatus, (struct AFSFid *) &newFid.Fid, &OutFidStatus, &OutDirStatus, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; break; default: op = AFS_STATS_FS_RPCIDX_CREATEFILE; code = 1; break; } } else code = -1; } while (afs_Analyze(tc, rxconn, code, &tdp->f.fid, areq, op, SHARED_LOCK, NULL)); /* TODO: Handle errors. */ if (code) { /* printf("afs_ProcessOpCreate: error while creating vnode on server, code=%d .\n", code); */ goto end; } /* The rpc doesn't set the cell number. */ newFid.Cell = avc->f.fid.Cell; /* * Change the fid in the dir entry. */ /* Seek the dir's dcache. */ tdc = afs_FindDCacheByFid(&tdp->f.fid); if (tdc) { /* And now change the fid in the parent dir entry. */ afs_dir_ChangeFid(tdc, tname, &avc->f.fid.Fid.Vnode, &newFid.Fid.Vnode); afs_PutDCache(tdc); } if (vType(avc) == VDIR) { /* Change fid in the dir for the "." entry. ".." has alredy been * handled by afs_FixChildrenFids when processing the parent dir. */ tdc = afs_FindDCacheByFid(&avc->f.fid); if (tdc) { afs_dir_ChangeFid(tdc, ".", &avc->f.fid.Fid.Vnode, &newFid.Fid.Vnode); if (avc->f.m.LinkCount >= 2) /* For non empty dirs, fix children's parentVnode and * parentUnique reference. */ afs_FixChildrenFids(&avc->f.fid, &newFid); afs_PutDCache(tdc); } } /* Recompute hash chain positions for vnode and dcaches. * Then change to the new FID. */ /* The vcache goes first. */ ObtainWriteLock(&afs_xvcache, 735); /* Old fid hash. */ hash = VCHash(&avc->f.fid); /* New fid hash. */ new_hash = VCHash(&newFid); /* Remove hash from old position. */ /* XXX: not checking array element contents. It shouldn't be empty. * If it oopses, then something else might be wrong. */ if (afs_vhashT[hash] == avc) { /* First in hash chain (might be the only one). */ afs_vhashT[hash] = avc->hnext; } else { /* More elements in hash chain. */ for (tvc = afs_vhashT[hash]; tvc; tvc = tvc->hnext) { if (tvc->hnext == avc) { tvc->hnext = avc->hnext; break; } } } /* if (!afs_vhashT[i]->hnext) */ QRemove(&avc->vhashq); /* Insert hash in new position. */ avc->hnext = afs_vhashT[new_hash]; afs_vhashT[new_hash] = avc; QAdd(&afs_vhashTV[VCHashV(&newFid)], &avc->vhashq); ReleaseWriteLock(&afs_xvcache); /* Do the same thing for all dcaches. */ hash = DVHash(&avc->f.fid); ObtainWriteLock(&afs_xdcache, 743); for (index = afs_dvhashTbl[hash]; index != NULLIDX; index = hash) { hash = afs_dvnextTbl[index]; tdc = afs_GetValidDSlot(index); ReleaseReadLock(&tdc->tlock); if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { if (!FidCmp(&tdc->f.fid, &avc->f.fid)) { /* Safer but slower. */ afs_HashOutDCache(tdc, 0); /* Put dcache in new positions in the dchash and dvhash. */ new_hash = DCHash(&newFid, tdc->f.chunk); afs_dcnextTbl[tdc->index] = afs_dchashTbl[new_hash]; afs_dchashTbl[new_hash] = tdc->index; new_hash = DVHash(&newFid); afs_dvnextTbl[tdc->index] = afs_dvhashTbl[new_hash]; afs_dvhashTbl[new_hash] = tdc->index; afs_indexUnique[tdc->index] = newFid.Fid.Unique; memcpy(&tdc->f.fid, &newFid, sizeof(struct VenusFid)); } /* if fid match */ } /* if uniquifier match */ if (tdc) afs_PutDCache(tdc); } /* for all dcaches in this hash bucket */ ReleaseWriteLock(&afs_xdcache); /* Now we can set the new fid. */ memcpy(&avc->f.fid, &newFid, sizeof(struct VenusFid)); end: if (tdp) afs_PutVCache(tdp); afs_osi_Free(tname, AFSNAMEMAX); if (ttargetName) afs_osi_Free(ttargetName, tlen); return code; }
int afs_InvalidateAllSegments(struct vcache *avc) { struct dcache *tdc; afs_int32 hash; afs_int32 index; struct dcache **dcList; int i, dcListMax, dcListCount; AFS_STATCNT(afs_InvalidateAllSegments); afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); hash = DVHash(&avc->f.fid); avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */ avc->f.states &= ~CExtendedFile; /* not any more */ ObtainWriteLock(&afs_xcbhash, 459); afs_DequeueCallback(avc); avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */ ReleaseWriteLock(&afs_xcbhash); if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR)) osi_dnlc_purgedp(avc); /* Blow away pages; for now, only for Solaris */ #if (defined(AFS_SUN5_ENV)) if (WriteLocked(&avc->lock)) osi_ReleaseVM(avc, (afs_ucred_t *)0); #endif /* * Block out others from screwing with this table; is a read lock * sufficient? */ ObtainWriteLock(&afs_xdcache, 286); dcListMax = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetDSlot(index, 0); ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) dcListMax++; afs_PutDCache(tdc); } index = afs_dvnextTbl[index]; } dcList = osi_Alloc(dcListMax * sizeof(struct dcache *)); dcListCount = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetDSlot(index, 0); ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) { /* same file? we'll zap it */ if (afs_indexFlags[index] & IFDataMod) { afs_stats_cmperf.cacheCurrDirtyChunks--; /* don't write it back */ afs_indexFlags[index] &= ~IFDataMod; } afs_indexFlags[index] &= ~IFAnyPages; if (dcListCount < dcListMax) dcList[dcListCount++] = tdc; else afs_PutDCache(tdc); } else { afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); for (i = 0; i < dcListCount; i++) { tdc = dcList[i]; ObtainWriteLock(&tdc->lock, 679); ZapDCE(tdc); if (vType(avc) == VDIR) DZap(tdc); ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } osi_Free(dcList, dcListMax * sizeof(struct dcache *)); return 0; }