/*! * \brief Query the AFSDB handler and wait for response. * \param acellName * \return 0 for success. < 0 is error. */ static int afs_GetCellHostsAFSDB(char *acellName) { AFS_ASSERT_GLOCK(); if (!afsdb_handler_running) return ENOENT; ObtainWriteLock(&afsdb_client_lock, 685); ObtainWriteLock(&afsdb_req_lock, 686); afsdb_req.cellname = acellName; afsdb_req.complete = 0; afsdb_req.pending = 1; afs_osi_Wakeup(&afsdb_req); ConvertWToRLock(&afsdb_req_lock); while (afsdb_handler_running && !afsdb_req.complete) { ReleaseReadLock(&afsdb_req_lock); afs_osi_Sleep(&afsdb_req); ObtainReadLock(&afsdb_req_lock); }; ReleaseReadLock(&afsdb_req_lock); ReleaseWriteLock(&afsdb_client_lock); if (afsdb_req.cellname) { return 0; } else return ENOENT; }
afs_int32 canWrite(int fid) { #ifndef AFS_PTHREAD_ENV afs_int32 code = 0; #endif extern dumpSyncP dumpSyncPtr; ObtainWriteLock(&dumpSyncPtr->ds_lock); /* let the pipe drain */ while (dumpSyncPtr->ds_bytes > 0) { if (dumpSyncPtr->ds_readerStatus == DS_WAITING) { dumpSyncPtr->ds_readerStatus = 0; #ifdef AFS_PTHREAD_ENV CV_BROADCAST(&dumpSyncPtr->ds_readerStatus_cond); #else code = LWP_SignalProcess(&dumpSyncPtr->ds_readerStatus); if (code) LogError(code, "canWrite: Signal delivery failed\n"); #endif } dumpSyncPtr->ds_writerStatus = DS_WAITING; ReleaseWriteLock(&dumpSyncPtr->ds_lock); #ifdef AFS_PTHREAD_ENV MUTEX_ENTER(&dumpSyncPtr->ds_writerStatus_mutex); CV_WAIT(&dumpSyncPtr->ds_writerStatus_cond, &dumpSyncPtr->ds_writerStatus_mutex); MUTEX_EXIT(&dumpSyncPtr->ds_writerStatus_mutex); #else LWP_WaitProcess(&dumpSyncPtr->ds_writerStatus); #endif ObtainWriteLock(&dumpSyncPtr->ds_lock); } return (1); }
int DNew(struct dcache *adc, int page, struct DirBuffer *entry) { /* Same as read, only do *not* even try to read the page, since it * probably doesn't exist. */ struct buffer *tb; AFS_STATCNT(DNew); ObtainWriteLock(&afs_bufferLock, 264); if ((tb = afs_newslot(adc, page, NULL)) == 0) { ReleaseWriteLock(&afs_bufferLock); return EIO; } /* extend the chunk, if needed */ /* Do it now, not in DFlush or afs_newslot when the data is written out, * since now our caller has adc->lock writelocked, and we can't acquire * that lock (or even map from a fid to a dcache) in afs_newslot or * DFlush due to lock hierarchy issues */ if ((page + 1) * AFS_BUFFER_PAGESIZE > adc->f.chunkBytes) { afs_AdjustSize(adc, (page + 1) * AFS_BUFFER_PAGESIZE); osi_Assert(afs_WriteDCache(adc, 1) == 0); } ObtainWriteLock(&tb->lock, 265); tb->lockers++; ReleaseWriteLock(&afs_bufferLock); ReleaseWriteLock(&tb->lock); entry->buffer = tb; entry->data = tb->data; return 0; }
static void deadlock_write (void) { struct Lock lock; Lock_Init (&lock); ObtainWriteLock(&lock); ObtainWriteLock(&lock); }
/* * This is almost exactly like the PFlush() routine in afs_pioctl.c, * but that routine is static. We are about to change a file from * bypassing caching to normal caching. Therefore, we want to * throw out any existing VM pages for the file. We keep track of * the number of times we go back and forth from caching to bypass. */ void afs_TransitionToCaching(struct vcache *avc, afs_ucred_t *acred, int aflags) { int resetDesire = 0; int setManual = 0; if (!avc) return; if (aflags & TRANSChangeDesiredBit) resetDesire = 1; if (aflags & TRANSSetManualBit) setManual = 1; #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&avc->pvnLock, avc); /* Since afs_TryToSmush will do a pvn_vptrunc */ #else AFS_GLOCK(); #endif ObtainWriteLock(&avc->lock, 926); /* * Someone may have beat us to doing the transition - we had no lock * when we checked the flag earlier. No cause to panic, just return. */ if (!(avc->cachingStates & FCSBypass)) goto done; /* Ok, we actually do need to flush */ ObtainWriteLock(&afs_xcbhash, 957); afs_DequeueCallback(avc); avc->f.states &= ~(CStatd | CDirty); /* next reference will re-stat cache entry */ ReleaseWriteLock(&afs_xcbhash); /* now find the disk cache entries */ afs_TryToSmush(avc, acred, 1); osi_dnlc_purgedp(avc); if (avc->linkData && !(avc->f.states & CCore)) { afs_osi_Free(avc->linkData, strlen(avc->linkData) + 1); avc->linkData = NULL; } avc->cachingStates &= ~(FCSBypass); /* Reset the bypass flag */ if (resetDesire) avc->cachingStates &= ~(FCSDesireBypass); if (setManual) avc->cachingStates |= FCSManuallySet; avc->cachingTransitions++; done: ReleaseWriteLock(&avc->lock); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&avc->pvnLock, avc); #else AFS_GUNLOCK(); #endif }
struct brequest * afs_BQueue(short aopcode, struct vcache *avc, afs_int32 dontwait, afs_int32 ause, afs_ucred_t *acred, afs_size_t asparm0, afs_size_t asparm1, void *apparm0, void *apparm1, void *apparm2) { int i; struct brequest *tb; AFS_STATCNT(afs_BQueue); ObtainWriteLock(&afs_xbrs, 296); while (1) { tb = afs_brs; for (i = 0; i < NBRS; i++, tb++) { if (tb->refCount == 0) break; } if (i < NBRS) { /* found a buffer */ tb->opcode = aopcode; tb->vc = avc; tb->cred = acred; if (tb->cred) { crhold(tb->cred); } if (avc) { AFS_FAST_HOLD(avc); } tb->refCount = ause + 1; tb->size_parm[0] = asparm0; tb->size_parm[1] = asparm1; tb->ptr_parm[0] = apparm0; tb->ptr_parm[1] = apparm1; tb->ptr_parm[2] = apparm2; tb->flags = 0; tb->code_raw = tb->code_checkcode = 0; tb->ts = afs_brs_count++; /* if daemons are waiting for work, wake them up */ if (afs_brsDaemons > 0) { afs_osi_Wakeup(&afs_brsDaemons); } ReleaseWriteLock(&afs_xbrs); return tb; } if (dontwait) { ReleaseWriteLock(&afs_xbrs); return NULL; } /* no free buffers, sleep a while */ afs_brsWaiters++; ReleaseWriteLock(&afs_xbrs); afs_osi_Sleep(&afs_brsWaiters); ObtainWriteLock(&afs_xbrs, 301); afs_brsWaiters--; } }
/*! * Remove a server from a cell's server list. * \param srvp Server to be removed. * \return */ void afs_RemoveCellEntry(struct server *srvp) { struct cell *tc; afs_int32 j, k; tc = srvp->cell; if (!tc) return; /* Remove the server structure from the cell list - if there */ ObtainWriteLock(&tc->lock, 200); for (j = k = 0; j < AFS_MAXCELLHOSTS; j++) { if (!tc->cellHosts[j]) break; if (tc->cellHosts[j] != srvp) { tc->cellHosts[k++] = tc->cellHosts[j]; } } if (k == 0) { /* What do we do if we remove the last one? */ } for (; k < AFS_MAXCELLHOSTS; k++) { tc->cellHosts[k] = 0; } ReleaseWriteLock(&tc->lock); }
int afspag_PUnlog(char *ain, afs_int32 ainSize, struct AFS_UCRED **acred) { register afs_int32 i; register struct unixuser *tu; afs_int32 pag, uid; AFS_STATCNT(PUnlog); if (!afs_resourceinit_flag) /* afs daemons haven't started yet */ return EIO; /* Inappropriate ioctl for device */ pag = PagInCred(*acred); uid = (pag == NOPAG) ? (*acred)->cr_uid : pag; i = UHash(uid); ObtainWriteLock(&afs_xuser, 823); for (tu = afs_users[i]; tu; tu = tu->next) { if (tu->uid == uid) { tu->vid = UNDEFVID; tu->states &= ~UHasTokens; /* security is not having to say you're sorry */ memset((char *)&tu->ct, 0, sizeof(struct ClearToken)); #ifdef UKERNEL /* set the expire times to 0, causes * afs_GCUserData to remove this entry */ tu->ct.EndTimestamp = 0; tu->tokenTime = 0; #endif /* UKERNEL */ } } ReleaseWriteLock(&afs_xuser); return 0; }
afs_int32 VnodeToSize(vnode_t * vp) { int code; struct vattr vattr; /* * We lock xosi in osi_Stat, so we probably should * lock it here too - RWH. */ ObtainWriteLock(&afs_xosi, 578); vattr.va_mask = AT_SIZE; AFS_GUNLOCK(); #ifdef AFS_SUN511_ENV code = VOP_GETATTR(vp, &vattr, 0, afs_osi_credp, NULL); #else code = VOP_GETATTR(vp, &vattr, 0, afs_osi_credp); #endif AFS_GLOCK(); if (code) { osi_Panic("VnodeToSize"); } ReleaseWriteLock(&afs_xosi); return (afs_int32) (vattr.va_size); }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { afs_ucred_t *oldCred; struct vattr tvattr; afs_int32 code; struct osi_stat tstat; AFS_STATCNT(osi_Truncate); /* This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); AFS_GUNLOCK(); #ifdef AFS_DARWIN80_ENV VATTR_INIT(&tvattr); VATTR_SET(&tvattr, va_size, asize); code = vnode_setattr(afile->vnode, &tvattr, afs_osi_ctxtp); #else VATTR_NULL(&tvattr); tvattr.va_size = asize; code = VOP_SETATTR(afile->vnode, &tvattr, &afs_osi_cred, current_proc()); #endif AFS_GLOCK(); ReleaseWriteLock(&afs_xosi); return code; }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { afs_ucred_t *oldCred; struct vattr tvattr; afs_int32 code; struct osi_stat tstat; mon_state_t ms; AFS_STATCNT(osi_Truncate); /* This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); AFS_GUNLOCK(); tvattr.va_mask = AT_SIZE; tvattr.va_size = asize; AFS_VOP_SETATTR(afile->vnode, &tvattr, 0, &afs_osi_cred, code); AFS_GLOCK(); ReleaseWriteLock(&afs_xosi); return code; }
int afs_MemCacheTruncate(struct osi_file *fP, int size) { struct memCacheEntry *mceP = (struct memCacheEntry *)fP; AFS_STATCNT(afs_MemCacheTruncate); ObtainWriteLock(&mceP->afs_memLock, 313); /* old directory entry; g.c. */ if (size == 0 && mceP->dataSize > memCacheBlkSize) { char *oldData = mceP->data; mceP->data = afs_osi_Alloc(memCacheBlkSize); if (mceP->data == NULL) { /* no available memory */ mceP->data = oldData; ReleaseWriteLock(&mceP->afs_memLock); afs_warn("afs: afs_MemWriteBlk mem alloc failure (%d bytes)\n", memCacheBlkSize); } else { afs_osi_Free(oldData, mceP->dataSize); mceP->dataSize = memCacheBlkSize; } } if (size < mceP->size) mceP->size = size; ReleaseWriteLock(&mceP->afs_memLock); return 0; }
/*! * Find the first dcache of a file that has the specified fid. * Similar to afs_FindDCache, only that it takes a fid instead * of a vcache and it can get the first dcache. * * \param afid * * \return The found dcache or NULL. */ struct dcache * afs_FindDCacheByFid(struct VenusFid *afid) { afs_int32 i, index; struct dcache *tdc = NULL; i = DVHash(afid); ObtainWriteLock(&afs_xdcache, 758); for (index = afs_dvhashTbl[i]; index != NULLIDX;) { if (afs_indexUnique[index] == afid->Fid.Unique) { tdc = afs_GetValidDSlot(index); if (tdc) { ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, afid)) { break; /* leaving refCount high for caller */ } afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); if (index == NULLIDX) tdc = NULL; return tdc; }
/*! * Generate a fake fid (vnode and uniquifier) for a vcache * (either dir or normal file). The vnode is generated via * afs_DisconVNode and the uniquifier by getting the highest * uniquifier on a hash chain and incrementing it by one. * * \param afid The fid structre that will be filled. * \param avtype Vnode type: VDIR/VREG. * \param lock True indicates that xvcache may be obtained, * False that it is already held * * \note The cell number must be completed somewhere else. */ void afs_GenFakeFid(struct VenusFid *afid, afs_uint32 avtype, int lock) { struct vcache *tvc; afs_uint32 max_unique = 0, i; switch (avtype) { case VDIR: afid->Fid.Vnode = afs_DisconVnode + 1; break; case VREG: case VLNK: afid->Fid.Vnode = afs_DisconVnode; break; } if (lock) ObtainWriteLock(&afs_xvcache, 736); i = VCHash(afid); for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { if (tvc->f.fid.Fid.Unique > max_unique) max_unique = tvc->f.fid.Fid.Unique; } if (lock) ReleaseWriteLock(&afs_xvcache); afid->Fid.Unique = max_unique + 1; afs_DisconVnode += 2; if (!afs_DisconVnode) afs_DisconVnode = 2; }
/*! * Generate a fake fid for a disconnected shadow dir. * Similar to afs_GenFakeFid, only that it uses the dhash * to search for a uniquifier because a shadow dir lives only * in the dcache. * * \param afid * * \note Don't forget to fill in afid with Cell and Volume. */ void afs_GenShadowFid(struct VenusFid *afid) { afs_uint32 i, index, max_unique = 1; struct vcache *tvc = NULL; /* Try generating a fid that isn't used in the vhash. */ do { /* Shadow Fids are always directories */ afid->Fid.Vnode = afs_DisconVnode + 1; i = DVHash(afid); ObtainWriteLock(&afs_xdcache, 737); for (index = afs_dvhashTbl[i]; index != NULLIDX; index = i) { i = afs_dvnextTbl[index]; if (afs_indexUnique[index] > max_unique) max_unique = afs_indexUnique[index]; } ReleaseWriteLock(&afs_xdcache); afid->Fid.Unique = max_unique + 1; afs_DisconVnode += 2; if (!afs_DisconVnode) afs_DisconVnode = 2; /* Is this a used vnode? */ ObtainSharedLock(&afs_xvcache, 762); tvc = afs_FindVCache(afid, 0, 1); ReleaseSharedLock(&afs_xvcache); if (tvc) afs_PutVCache(tvc); } while (tvc); }
int DFlush(void) { /* Flush all the modified buffers. */ int i; struct buffer **tbp; afs_int32 code, rcode; rcode = 0; tbp = Buffers; ObtainReadLock(&afs_bufferLock); for (i = 0; i < nbuffers; i++, tbp++) { if ((*tbp)->dirty) { ObtainWriteLock(&(*tbp)->lock); (*tbp)->lockers++; ReleaseReadLock(&afs_bufferLock); if ((*tbp)->dirty) { code = ReallyWrite((*tbp)->fid, (*tbp)->page, (*tbp)->data); if (!code) (*tbp)->dirty = 0; /* Clear the dirty flag */ if (code && !rcode) { rcode = code; } } (*tbp)->lockers--; ReleaseWriteLock(&(*tbp)->lock); ObtainReadLock(&afs_bufferLock); } } ReleaseReadLock(&afs_bufferLock); return rcode; }
int DFlushEntry(afs_int32 *fid) { /* Flush pages modified by one entry. */ struct buffer *tb; int code; ObtainReadLock(&afs_bufferLock); for (tb = phTable[pHash(fid)]; tb; tb = tb->hashNext) if (FidEq(tb->fid, fid) && tb->dirty) { ObtainWriteLock(&tb->lock); if (tb->dirty) { code = ReallyWrite(tb->fid, tb->page, tb->data); if (code) { ReleaseWriteLock(&tb->lock); ReleaseReadLock(&afs_bufferLock); return code; } tb->dirty = 0; } ReleaseWriteLock(&tb->lock); } ReleaseReadLock(&afs_bufferLock); return 0; }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { afs_ucred_t *oldCred; struct vattr tvattr; afs_int32 code; struct osi_stat tstat; afs_int32 mode = FWRITE | FSYNC; AFS_STATCNT(osi_Truncate); /* This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); /* * If we're truncating an unopened file to a non-zero length, * we need to bind it to a vm segment * Note that that the binding will actually happen inside * jfs by xix_ftrunc; setting mode to 0 will enable that. */ if (asize && !VTOGP(afile->vnode)->gn_seg) mode = 0; AFS_GUNLOCK(); code = VNOP_FTRUNC(afile->vnode, mode, asize, (caddr_t) 0, &afs_osi_cred); AFS_GLOCK(); ReleaseWriteLock(&afs_xosi); return code; }
/* allocate space for sender */ void * osi_AllocSmallSpace(size_t size) { struct osi_packet *tp; AFS_STATCNT(osi_AllocSmallSpace); if (size > AFS_SMALLOCSIZ) osi_Panic("osi_AllocSmallS: size=%d\n", (int)size); if (!freeSmallList) { afs_stats_cmperf.SmallBlocksAlloced++; afs_stats_cmperf.SmallBlocksActive++; tp = afs_osi_Alloc(AFS_SMALLOCSIZ); #ifdef KERNEL_HAVE_PIN pin((char *)tp, AFS_SMALLOCSIZ); #endif return (char *)tp; } afs_stats_cmperf.SmallBlocksActive++; ObtainWriteLock(&osi_fsplock, 327); tp = freeSmallList; if (tp) freeSmallList = tp->next; ReleaseWriteLock(&osi_fsplock); return (char *)tp; }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { afs_ucred_t *oldCred; struct vattr tvattr; afs_int32 code; struct osi_stat tstat; AFS_STATCNT(osi_Truncate); /* This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); VATTR_NULL(&tvattr); /* note that this credential swapping stuff is only necessary because * of ufs's references directly to u.u_cred instead of to * credentials parameter. Probably should fix ufs some day. */ oldCred = p_cred(u.u_procp); set_p_cred(u.u_procp, &afs_osi_cred); tvattr.va_size = asize; AFS_GUNLOCK(); code = VOP_SETATTR(afile->vnode, &tvattr, &afs_osi_cred, 0); AFS_GLOCK(); set_p_cred(u.u_procp, oldCred); /* restore */ ReleaseWriteLock(&afs_xosi); return code; }
/* allocate space for sender */ void * osi_AllocLargeSpace(size_t size) { struct osi_packet *tp; AFS_ASSERT_GLOCK(); AFS_STATCNT(osi_AllocLargeSpace); if (size > AFS_LRALLOCSIZ) osi_Panic("osi_AllocLargeSpace: size=%d\n", (int)size); afs_stats_cmperf.LargeBlocksActive++; if (!freePacketList) { char *p; afs_stats_cmperf.LargeBlocksAlloced++; p = afs_osi_Alloc(AFS_LRALLOCSIZ); #ifdef KERNEL_HAVE_PIN /* * Need to pin this memory since under heavy conditions this memory * could be swapped out; the problem is that we could inside rx where * interrupts are disabled and thus we would panic if we don't pin it. */ pin(p, AFS_LRALLOCSIZ); #endif return p; } ObtainWriteLock(&osi_flplock, 324); tp = freePacketList; if (tp) freePacketList = tp->next; ReleaseWriteLock(&osi_flplock); return (char *)tp; }
int afs_osi_Stat(struct osi_file *afile, struct osi_stat *astat) { afs_int32 code; struct vattr tvattr; AFS_STATCNT(osi_Stat); ObtainWriteLock(&afs_xosi, 320); AFS_GUNLOCK(); #if defined(AFS_FBSD80_ENV) vn_lock(afile->vnode, LK_EXCLUSIVE | LK_RETRY); code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp); VOP_UNLOCK(afile->vnode, 0); #else vn_lock(afile->vnode, LK_EXCLUSIVE | LK_RETRY, curthread); code = VOP_GETATTR(afile->vnode, &tvattr, afs_osi_credp, curthread); VOP_UNLOCK(afile->vnode, LK_EXCLUSIVE, curthread); #endif AFS_GLOCK(); if (code == 0) { astat->size = tvattr.va_size; astat->mtime = tvattr.va_mtime.tv_sec; astat->atime = tvattr.va_atime.tv_sec; } ReleaseWriteLock(&afs_xosi); return code; }
/* * Invalidate the /afs vnode for dynroot; called when the underlying * directory has changed and needs to be re-read. */ void afs_DynrootInvalidate(void) { afs_int32 retry; struct vcache *tvc; struct VenusFid tfid; if (!afs_dynrootEnable) return; ObtainWriteLock(&afs_dynrootDirLock, 687); afs_dynrootVersion++; afs_dynrootVersionHigh = osi_Time(); ReleaseWriteLock(&afs_dynrootDirLock); afs_GetDynrootFid(&tfid); do { retry = 0; ObtainReadLock(&afs_xvcache); tvc = afs_FindVCache(&tfid, &retry, 0); ReleaseReadLock(&afs_xvcache); } while (retry); if (tvc) { tvc->f.states &= ~(CStatd | CUnique); osi_dnlc_purgedp(tvc); afs_PutVCache(tvc); } }
int osi_UFSTruncate(struct osi_file *afile, afs_int32 asize) { struct vattr tvattr; afs_int32 code; struct osi_stat tstat; AFS_STATCNT(osi_Truncate); /* * This routine only shrinks files, and most systems * have very slow truncates, even when the file is already * small enough. Check now and save some time. */ code = afs_osi_Stat(afile, &tstat); if (code || tstat.size <= asize) return code; ObtainWriteLock(&afs_xosi, 321); VATTR_NULL(&tvattr); tvattr.va_size = asize; AFS_GUNLOCK(); VOP_LOCK(afile->vnode, LK_EXCLUSIVE | LK_RETRY, curproc); code = VOP_SETATTR(afile->vnode, &tvattr, afs_osi_credp, curproc); VOP_UNLOCK(afile->vnode, 0, curproc); AFS_GLOCK(); if (code == 0) afile->size = asize; ReleaseWriteLock(&afs_xosi); return code; }
/* Try to invalidate pages, for "fs flush" or "fs flushv"; or * try to free pages, when deleting a file. * * Locking: the vcache entry's lock is held. It may be dropped and * re-obtained. * * Since we drop and re-obtain the lock, we can't guarantee that there won't * be some pages around when we return, newly created by concurrent activity. */ void osi_VM_TryToSmush(struct vcache *avc, afs_ucred_t *acred, int sync) { ReleaseWriteLock(&avc->lock); osi_VM_FlushVCache(avc, NULL); ObtainWriteLock(&avc->lock, 59); }
static int afs_DisconCreateSymlink(struct vcache *avc, char *aname, struct vrequest *areq) { struct dcache *tdc; struct osi_file *tfile; afs_size_t offset, len; tdc = afs_GetDCache(avc, 0, areq, &offset, &len, 0); if (!tdc) { /* printf("afs_DisconCreateSymlink: can't get new dcache for symlink.\n"); */ return ENETDOWN; } len = strlen(aname); avc->f.m.Length = len; ObtainWriteLock(&tdc->lock, 720); afs_AdjustSize(tdc, len); tdc->validPos = len; tfile = afs_CFileOpen(&tdc->f.inode); afs_CFileWrite(tfile, 0, aname, len); afs_CFileClose(tfile); ReleaseWriteLock(&tdc->lock); return 0; }
int afs_osi_Stat(struct osi_file *afile, struct osi_stat *astat) { afs_int32 code; struct vattr tvattr; AFS_STATCNT(osi_Stat); ObtainWriteLock(&afs_xosi, 320); AFS_GUNLOCK(); #ifdef AFS_DARWIN80_ENV VATTR_INIT(&tvattr); VATTR_WANTED(&tvattr, va_size); VATTR_WANTED(&tvattr, va_blocksize); VATTR_WANTED(&tvattr, va_mtime); VATTR_WANTED(&tvattr, va_atime); code = vnode_getattr(afile->vnode, &tvattr, afs_osi_ctxtp); if (code == 0 && !VATTR_ALL_SUPPORTED(&tvattr)) code = EINVAL; #else code = VOP_GETATTR(afile->vnode, &tvattr, &afs_osi_cred, current_proc()); #endif AFS_GLOCK(); if (code == 0) { astat->size = tvattr.va_size; astat->mtime = tvattr.va_mtime.tv_sec; astat->atime = tvattr.va_atime.tv_sec; } ReleaseWriteLock(&afs_xosi); return code; }
struct afspag_cell *afspag_GetCell(char *acell) { struct afspag_cell *tcell; ObtainWriteLock(&afs_xpagcell, 820); for (tcell = cells; tcell; tcell = tcell->next) { if (!strcmp(acell, tcell->cellname)) break; } if (!tcell) { tcell = (struct afspag_cell *)afs_osi_Alloc(sizeof(struct afspag_cell)); if (!tcell) goto out; tcell->cellname = (char *)afs_osi_Alloc(strlen(acell) + 1); if (!tcell->cellname) { afs_osi_Free(tcell, sizeof(struct afspag_cell)); tcell = 0; goto out; } strcpy(tcell->cellname, acell); tcell->cellnum = ++lastcell; tcell->next = cells; cells = tcell; if (!primary_cell) primary_cell = tcell; } out: ReleaseWriteLock(&afs_xpagcell); return tcell; }
/** * Seek volume by it's name and attributes. * If volume not found, try to add one. * @param aname Volume name. * @param acell Cell * @param agood * @param areq * @param locktype Type of lock to be used. * @return */ struct volume * afs_GetVolumeByName(char *aname, afs_int32 acell, int agood, struct vrequest *areq, afs_int32 locktype) { afs_int32 i; struct volume *tv; AFS_STATCNT(afs_GetVolumeByName); ObtainWriteLock(&afs_xvolume, 112); for (i = 0; i < NVOLS; i++) { for (tv = afs_volumes[i]; tv; tv = tv->next) { if (tv->name && !strcmp(aname, tv->name) && tv->cell == acell && (tv->states & VRecheck) == 0) { tv->refCount++; ReleaseWriteLock(&afs_xvolume); return tv; } } } ReleaseWriteLock(&afs_xvolume); if (AFS_IS_DISCONNECTED) return NULL; tv = afs_NewVolumeByName(aname, acell, agood, areq, locktype); return (tv); }
/* afs_FlushServerCBs * to be used only in dire circumstances, this drops all callbacks on * the floor for a specific server, without giving them back to the server. * It's ok, the server can deal with it, but it is a little bit rude. */ void afs_FlushServerCBs(struct server *srvp) { register int i; register struct vcache *tvc; ObtainWriteLock(&afs_xcbhash, 86); /* pretty likely I'm going to remove something */ for (i = 0; i < VCSIZE; i++) { /* reset all the vnodes */ for (tvc = afs_vhashT[i]; tvc; tvc = tvc->hnext) { if (tvc->callback == srvp) { tvc->callback = 0; tvc->dchint = NULL; /* invalidate hints */ tvc->f.states &= ~(CStatd); if (!(tvc->f.states & (CVInit|CVFlushed)) && ((tvc->f.fid.Fid.Vnode & 1) || (vType(tvc) == VDIR))) { osi_dnlc_purgedp(tvc); } afs_DequeueCallback(tvc); } } } ReleaseWriteLock(&afs_xcbhash); }