/* Generic read interface */ int afs_osi_Read(struct osi_file *afile, int offset, void *aptr, afs_int32 asize) { afs_ucred_t *oldCred; unsigned int resid; afs_int32 code; afs_int32 cnt1 = 0; AFS_STATCNT(osi_Read); /** * If the osi_file passed in is NULL, panic only if AFS is not shutting * down. No point in crashing when we are already shutting down */ if (!afile) { if (!afs_shuttingdown) osi_Panic("osi_Read called with null param"); else return -EIO; } if (offset != -1) afile->offset = offset; retry_IO: /* Note the difference in the way the afile->offset is passed (see comments in gop_rdwr() in afs_aix_subr.c for comments) */ AFS_GUNLOCK(); #ifdef AFS_64BIT_KERNEL code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, &afile->offset, AFS_UIOSYS, NULL, &resid); #else code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, (off_t) & afile->offset, AFS_UIOSYS, NULL, &resid); #endif AFS_GLOCK(); if (code == 0) { code = asize - resid; afile->offset += code; osi_DisableAtimes(afile->vnode); } else { afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, resid, ICL_TYPE_INT32, code); /* * To handle periodic low-level EFAULT failures that we've seen with the * Weitek chip; in all observed failed cases a second read succeeded. */ if ((code == EFAULT) && (cnt1++ < 5)) { afs_stats_cmperf.osiread_efaults++; goto retry_IO; } setuerror(code); if (code > 0) { code = -code; } } return code; }
/*! * Look up AFSDB for given cell name and create locally. * \param acellName Cell name. */ void afs_LookupAFSDB(char *acellName) { int code; char *cellName = afs_strdup(acellName); code = afs_GetCellHostsAFSDB(cellName); afs_Trace2(afs_iclSetp, CM_TRACE_AFSDB, ICL_TYPE_STRING, cellName, ICL_TYPE_INT32, code); afs_osi_FreeStr(cellName); }
/* afs_root - stat the root of the file system. AFS global held on entry. */ static int afs_root(struct super_block *afsp) { afs_int32 code = 0; struct vcache *tvp = 0; AFS_STATCNT(afs_root); if (afs_globalVp && (afs_globalVp->f.states & CStatd)) { tvp = afs_globalVp; } else { struct vrequest *treq = NULL; cred_t *credp = crref(); if (afs_globalVp) { afs_PutVCache(afs_globalVp); afs_globalVp = NULL; } if (!(code = afs_CreateReq(&treq, credp)) && !(code = afs_CheckInit())) { tvp = afs_GetVCache(&afs_rootFid, treq, NULL, NULL); if (tvp) { struct inode *ip = AFSTOV(tvp); struct vattr *vattr = NULL; code = afs_CreateAttr(&vattr); if (!code) { afs_getattr(tvp, vattr, credp); afs_fill_inode(ip, vattr); /* setup super_block and mount point inode. */ afs_globalVp = tvp; #if defined(HAVE_LINUX_D_MAKE_ROOT) afsp->s_root = d_make_root(ip); #else afsp->s_root = d_alloc_root(ip); #endif #if !defined(STRUCT_SUPER_BLOCK_HAS_S_D_OP) afsp->s_root->d_op = &afs_dentry_operations; #endif afs_DestroyAttr(vattr); } } else code = EIO; } crfree(credp); afs_DestroyReq(treq); } afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, afs_globalVp, ICL_TYPE_INT32, code); return code; }
/* Generic read interface */ int afs_osi_Read(struct osi_file *afile, int offset, void *aptr, afs_int32 asize) { afs_ucred_t *oldCred; afs_size_t resid; afs_int32 code; #ifdef AFS_DARWIN80_ENV uio_t uio; #endif AFS_STATCNT(osi_Read); /** * If the osi_file passed in is NULL, panic only if AFS is not shutting * down. No point in crashing when we are already shutting down */ if (!afile) { if (afs_shuttingdown == AFS_RUNNING) osi_Panic("osi_Read called with null param"); else return -EIO; } if (offset != -1) afile->offset = offset; AFS_GUNLOCK(); #ifdef AFS_DARWIN80_ENV uio=uio_create(1, afile->offset, AFS_UIOSYS, UIO_READ); uio_addiov(uio, CAST_USER_ADDR_T(aptr), asize); code = VNOP_READ(afile->vnode, uio, IO_UNIT, afs_osi_ctxtp); resid = AFS_UIO_RESID(uio); uio_free(uio); #else code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, afile->offset, AFS_UIOSYS, IO_UNIT, &afs_osi_cred, &resid); #endif AFS_GLOCK(); if (code == 0) { code = asize - resid; afile->offset += code; osi_DisableAtimes(afile->vnode); } else { afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, resid, ICL_TYPE_INT32, code); if (code > 0) { code *= -1; } } return code; }
/* Generic read interface */ int afs_osi_Read(struct osi_file *afile, int offset, void *aptr, afs_int32 asize) { afs_ucred_t *oldCred; long resid; afs_int32 code; afs_int32 cnt1 = 0; AFS_STATCNT(osi_Read); /** * If the osi_file passed in is NULL, panic only if AFS is not shutting * down. No point in crashing when we are already shutting down */ if (!afile) { if (afs_shuttingdown == AFS_RUNNING) osi_Panic("osi_Read called with null param"); else return -EIO; } if (offset != -1) afile->offset = offset; retry_IO: AFS_GUNLOCK(); code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, afile->offset, AFS_UIOSYS, IO_UNIT, &resid); AFS_GLOCK(); if (code == 0) { code = asize - resid; afile->offset += code; osi_DisableAtimes(afile->vnode); } else { afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, (afs_int32) resid, ICL_TYPE_INT32, code); /* * To handle periodic low-level EFAULT failures that we've seen with the * Weitek chip; in all observed failed cases a second read succeeded. */ if ((code == EFAULT) && (cnt1++ < 5)) { afs_stats_cmperf.osiread_efaults++; goto retry_IO; } setuerror(code); if (code > 0) { code *= -1; } } return code; }
/* afs_root - stat the root of the file system. AFS global held on entry. */ static int afs_root(struct super_block *afsp) { afs_int32 code = 0; struct vrequest treq; struct vcache *tvp = 0; AFS_STATCNT(afs_root); if (afs_globalVp && (afs_globalVp->f.states & CStatd)) { tvp = afs_globalVp; } else { cred_t *credp = crref(); if (afs_globalVp) { afs_PutVCache(afs_globalVp); afs_globalVp = NULL; } if (!(code = afs_InitReq(&treq, credp)) && !(code = afs_CheckInit())) { tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL); if (tvp) { struct inode *ip = AFSTOV(tvp); struct vattr vattr; afs_getattr(tvp, &vattr, credp); afs_fill_inode(ip, &vattr); /* setup super_block and mount point inode. */ afs_globalVp = tvp; #if defined(AFS_LINUX24_ENV) afsp->s_root = d_alloc_root(ip); #else afsp->s_root = d_alloc_root(ip, NULL); #endif afsp->s_root->d_op = &afs_dentry_operations; } else code = ENOENT; } crfree(credp); } afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, afs_globalVp, ICL_TYPE_INT32, code); return code; }
/* Generic read interface */ int afs_osi_Read(struct osi_file *afile, int offset, void *aptr, afs_int32 asize) { struct uio auio; struct iovec iov; afs_int32 code; memset(&auio, 0, sizeof(auio)); memset(&iov, 0, sizeof(iov)); AFS_STATCNT(osi_Read); /* * If the osi_file passed in is NULL, panic only if AFS is not shutting * down. No point in crashing when we are already shutting down */ if (!afile) { if (afs_shuttingdown == AFS_RUNNING) osi_Panic("osi_Read called with null param"); else return -EIO; } if (offset != -1) afile->offset = offset; setup_uio(&auio, &iov, aptr, afile->offset, asize, UIO_READ, AFS_UIOSYS); AFS_GUNLOCK(); code = osi_rdwr(afile, &auio, UIO_READ); AFS_GLOCK(); if (code == 0) { code = asize - auio.uio_resid; afile->offset += code; } else { afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, auio.uio_resid, ICL_TYPE_INT32, code); if (code > 0) { code = -code; } } return code; }
/* Generic read interface */ int afs_osi_Read(struct osi_file *afile, int offset, void *aptr, afs_int32 asize) { afs_ucred_t *oldCred; ssize_t resid; afs_int32 code; afs_int32 cnt1 = 0; AFS_STATCNT(osi_Read); /** * If the osi_file passed in is NULL, panic only if AFS is not shutting * down. No point in crashing when we are already shutting down */ if (!afile) { if (!afs_shuttingdown) osi_Panic("osi_Read called with null param"); else return -EIO; } if (offset != -1) afile->offset = offset; AFS_GUNLOCK(); code = gop_rdwr(UIO_READ, afile->vnode, (caddr_t) aptr, asize, afile->offset, AFS_UIOSYS, 0, 0, afs_osi_credp, &resid); AFS_GLOCK(); if (code == 0) { code = asize - resid; afile->offset += code; osi_DisableAtimes(afile->vnode); } else { afs_Trace2(afs_iclSetp, CM_TRACE_READFAILED, ICL_TYPE_INT32, resid, ICL_TYPE_INT32, code); if (code > 0) { code = -code; } } return code; }
int afs_CheckCode(afs_int32 acode, struct vrequest *areq, int where) { AFS_STATCNT(afs_CheckCode); if (acode) { afs_Trace2(afs_iclSetp, CM_TRACE_CHECKCODE, ICL_TYPE_INT32, acode, ICL_TYPE_INT32, where); } if ((acode & ~0xff) == ERROR_TABLE_BASE_uae) acode = et_to_sys_error(acode); if (!areq || !areq->initd) return acode; if (areq->networkError) return ETIMEDOUT; if (acode == 0) return 0; if (areq->accessError) return EACCES; if (areq->volumeError == VOLMISSING) return ENODEV; if (areq->volumeError == VOLBUSY) return EWOULDBLOCK; if (acode == VNOVNODE) return ENOENT; if (acode == VDISKFULL) return ENOSPC; if (acode == VOVERQUOTA) return #ifdef EDQUOT EDQUOT #else ENOSPC #endif ; return acode; } /*afs_CheckCode */
afs_root(struct mount *mp, struct vnode **vpp) #endif { int error; struct vrequest treq; register struct vcache *tvp = 0; #ifdef AFS_FBSD50_ENV #ifndef AFS_FBSD53_ENV struct thread *td = curthread; #endif struct ucred *cr = td->td_ucred; #else struct proc *p = curproc; struct ucred *cr = p->p_cred->pc_ucred; #endif AFS_GLOCK(); AFS_STATCNT(afs_root); crhold(cr); if (afs_globalVp && (afs_globalVp->f.states & CStatd)) { tvp = afs_globalVp; error = 0; } else { tryagain: #ifndef AFS_FBSD80_ENV if (afs_globalVp) { afs_PutVCache(afs_globalVp); /* vrele() needed here or not? */ afs_globalVp = NULL; } #endif if (!(error = afs_InitReq(&treq, cr)) && !(error = afs_CheckInit())) { tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL); /* we really want this to stay around */ if (tvp) afs_globalVp = tvp; else error = ENOENT; } } if (tvp) { struct vnode *vp = AFSTOV(tvp); #ifdef AFS_FBSD50_ENV ASSERT_VI_UNLOCKED(vp, "afs_root"); #endif AFS_GUNLOCK(); /* * I'm uncomfortable about this. Shouldn't this happen at a * higher level, and shouldn't we busy the top-level directory * to prevent recycling? */ #ifdef AFS_FBSD50_ENV error = vget(vp, LK_EXCLUSIVE | LK_RETRY, td); vp->v_vflag |= VV_ROOT; #else error = vget(vp, LK_EXCLUSIVE | LK_RETRY, p); vp->v_flag |= VROOT; #endif AFS_GLOCK(); if (error != 0) goto tryagain; afs_globalVFS = mp; *vpp = vp; } afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, tvp ? AFSTOV(tvp) : NULL, ICL_TYPE_INT32, error); AFS_GUNLOCK(); crfree(cr); return error; }
afs_rmdir(OSI_VC_DECL(adp), char *aname, afs_ucred_t *acred) #endif { struct vrequest *treq = NULL; struct dcache *tdc; struct vcache *tvc = NULL; afs_int32 code; struct afs_conn *tc; afs_size_t offset, len; struct AFSFetchStatus OutDirStatus; struct AFSVolSync tsync; struct afs_fakestat_state fakestate; struct rx_connection *rxconn; XSTATS_DECLS; OSI_VC_CONVERT(adp); AFS_STATCNT(afs_rmdir); afs_Trace2(afs_iclSetp, CM_TRACE_RMDIR, ICL_TYPE_POINTER, adp, ICL_TYPE_STRING, aname); if ((code = afs_CreateReq(&treq, acred))) goto done2; afs_InitFakeStat(&fakestate); if (strlen(aname) > AFSNAMEMAX) { code = ENAMETOOLONG; goto done; } AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&adp, &fakestate, treq); if (code) goto done; code = afs_VerifyVCache(adp, treq); if (code) goto done; /** If the volume is read-only, return error without making an RPC to the * fileserver */ if (adp->f.states & CRO) { code = EROFS; goto done; } if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { /* Disconnected read only mode. */ code = ENETDOWN; goto done; } tdc = afs_GetDCache(adp, (afs_size_t) 0, treq, &offset, &len, 1); /* test for error below */ ObtainWriteLock(&adp->lock, 154); if (tdc) ObtainSharedLock(&tdc->lock, 633); if (tdc && (adp->f.states & CForeign)) { struct VenusFid unlinkFid; unlinkFid.Fid.Vnode = 0; code = afs_dir_Lookup(tdc, aname, &unlinkFid.Fid); if (code == 0) { afs_int32 cached = 0; unlinkFid.Cell = adp->f.fid.Cell; unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume; if (unlinkFid.Fid.Unique == 0) { tvc = afs_LookupVCache(&unlinkFid, treq, &cached, adp, aname); } else { ObtainReadLock(&afs_xvcache); tvc = afs_FindVCache(&unlinkFid, 0, 1 /* do xstats */ ); ReleaseReadLock(&afs_xvcache); } } } if (!AFS_IS_DISCON_RW) { /* Not disconnected, can connect to server. */ do { tc = afs_Conn(&adp->f.fid, treq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_REMOVEDIR); RX_AFS_GUNLOCK(); code = RXAFS_RemoveDir(rxconn, (struct AFSFid *)&adp->f.fid.Fid, aname, &OutDirStatus, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &adp->f.fid, treq, AFS_STATS_FS_RPCIDX_REMOVEDIR, SHARED_LOCK, NULL)); if (code) { if (tdc) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } if (code < 0) { afs_StaleVCache(adp); } ReleaseWriteLock(&adp->lock); goto done; } /* here if rpc worked; update the in-core link count */ adp->f.m.LinkCount = OutDirStatus.LinkCount; } else { /* Disconnected. */ if (!tdc) { ReleaseWriteLock(&adp->lock); /* printf("afs_rmdir: No local dcache!\n"); */ code = ENETDOWN; goto done; } if (!tvc) { /* Find the vcache. */ struct VenusFid tfid; tfid.Cell = adp->f.fid.Cell; tfid.Fid.Volume = adp->f.fid.Fid.Volume; code = afs_dir_Lookup(tdc, aname, &tfid.Fid); ObtainSharedLock(&afs_xvcache, 764); tvc = afs_FindVCache(&tfid, 0, 1 /* do xstats */ ); ReleaseSharedLock(&afs_xvcache); if (!tvc) { /* printf("afs_rmdir: Can't find dir's vcache!\n"); */ ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); /* drop ref count */ ReleaseWriteLock(&adp->lock); code = ENETDOWN; goto done; } } if (tvc->f.m.LinkCount > 2) { /* This dir contains more than . and .., thus it can't be * deleted. */ ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); afs_PutVCache(tvc); ReleaseWriteLock(&adp->lock); code = ENOTEMPTY; goto done; } /* Make a shadow copy of the parent dir (if not done already). * If we were created locally, then we don't need to have a shadow * directory (as there's no server state to remember) */ if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) { afs_MakeShadowDir(adp, tdc); } adp->f.m.LinkCount--; } /* if (!AFS_IS_DISCON_RW) */ if (tdc) UpgradeSToWLock(&tdc->lock, 634); if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, &OutDirStatus, 1)) { /* we can do it locally */ code = afs_dir_Delete(tdc, aname); if (code) { ZapDCE(tdc); /* surprise error -- invalid value */ DZap(tdc); } } if (tdc) { ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); /* drop ref count */ } if (tvc) osi_dnlc_purgedp(tvc); /* get rid of any entries for this directory */ else osi_dnlc_remove(adp, aname, 0); if (tvc) { ObtainWriteLock(&tvc->lock, 155); tvc->f.states &= ~CUnique; /* For the dfs xlator */ if (AFS_IS_DISCON_RW) { if (tvc->f.ddirty_flags & VDisconCreate) { /* If we we were created whilst disconnected, removal doesn't * need to get logged. Just go away gracefully */ afs_DisconRemoveDirty(tvc); } else { afs_DisconAddDirty(tvc, VDisconRemove, 1); } } ReleaseWriteLock(&tvc->lock); afs_PutVCache(tvc); } ReleaseWriteLock(&adp->lock); /* don't worry about link count since dirs can not be hardlinked */ code = 0; done: AFS_DISCON_UNLOCK(); afs_PutFakeStat(&fakestate); code = afs_CheckCode(code, treq, 27); afs_DestroyReq(treq); done2: return code; }
static int afs_StoreMini(struct vcache *avc, struct vrequest *areq) { struct afs_conn *tc; struct AFSStoreStatus InStatus; struct AFSFetchStatus OutStatus; struct AFSVolSync tsync; afs_int32 code; struct rx_call *tcall; struct rx_connection *rxconn; afs_size_t tlen, xlen = 0; XSTATS_DECLS; AFS_STATCNT(afs_StoreMini); afs_Trace2(afs_iclSetp, CM_TRACE_STOREMINI, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, avc->f.m.Length); tlen = avc->f.m.Length; if (avc->f.truncPos < tlen) tlen = avc->f.truncPos; avc->f.truncPos = AFS_NOTRUNC; avc->f.states &= ~CExtendedFile; do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { #ifdef AFS_64BIT_CLIENT retry: #endif RX_AFS_GUNLOCK(); tcall = rx_NewCall(rxconn); RX_AFS_GLOCK(); /* Set the client mod time since we always want the file * to have the client's mod time and not the server's one * (to avoid problems with make, etc.) It almost always * works fine with standard afs because them server/client * times are in sync and more importantly this storemini * it's a special call that would typically be followed by * the proper store-data or store-status calls. */ InStatus.Mask = AFS_SETMODTIME; InStatus.ClientModTime = avc->f.m.Date; XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_STOREDATA); afs_Trace4(afs_iclSetp, CM_TRACE_STOREDATA64, ICL_TYPE_FID, &avc->f.fid.Fid, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(xlen), ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(tlen)); RX_AFS_GUNLOCK(); #ifdef AFS_64BIT_CLIENT if (!afs_serverHasNo64Bit(tc)) { code = StartRXAFS_StoreData64(tcall, (struct AFSFid *)&avc->f.fid.Fid, &InStatus, avc->f.m.Length, (afs_size_t) 0, tlen); } else { afs_int32 l1, l2; l1 = avc->f.m.Length; l2 = tlen; if ((avc->f.m.Length > 0x7fffffff) || (tlen > 0x7fffffff) || ((0x7fffffff - tlen) < avc->f.m.Length)) { code = EFBIG; goto error; } code = StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid, &InStatus, l1, 0, l2); } #else /* AFS_64BIT_CLIENT */ code = StartRXAFS_StoreData(tcall, (struct AFSFid *)&avc->f.fid.Fid, &InStatus, avc->f.m.Length, 0, tlen); #endif /* AFS_64BIT_CLIENT */ if (code == 0) { code = EndRXAFS_StoreData(tcall, &OutStatus, &tsync); } #ifdef AFS_64BIT_CLIENT error: #endif code = rx_EndCall(tcall, code); RX_AFS_GLOCK(); XSTATS_END_TIME; #ifdef AFS_64BIT_CLIENT if (code == RXGEN_OPCODE && !afs_serverHasNo64Bit(tc)) { afs_serverSetNo64Bit(tc); goto retry; } #endif /* AFS_64BIT_CLIENT */ } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_STOREDATA, SHARED_LOCK, NULL)); if (code == 0) afs_ProcessFS(avc, &OutStatus, areq); return code; } /*afs_StoreMini */
afs_open(struct vcache **avcp, afs_int32 aflags, afs_ucred_t *acred) #endif { afs_int32 code; struct vrequest treq; struct vcache *tvc; int writing; struct afs_fakestat_state fakestate; AFS_STATCNT(afs_open); if ((code = afs_InitReq(&treq, acred))) return code; #ifdef AFS_SGI64_ENV /* avcpp can be, but is not necesarily, bhp's vnode. */ tvc = VTOAFS(BHV_TO_VNODE(bhv)); #else tvc = *avcp; #endif afs_Trace2(afs_iclSetp, CM_TRACE_OPEN, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, aflags); afs_InitFakeStat(&fakestate); AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&tvc, &fakestate, &treq); if (code) goto done; code = afs_VerifyVCache(tvc, &treq); if (code) goto done; ObtainReadLock(&tvc->lock); if (AFS_IS_DISCONNECTED && (afs_DCacheMissingChunks(tvc) != 0)) { ReleaseReadLock(&tvc->lock); /* printf("Network is down in afs_open: missing chunks\n"); */ code = ENETDOWN; goto done; } ReleaseReadLock(&tvc->lock); if (aflags & (FWRITE | FTRUNC)) writing = 1; else writing = 0; if (vType(tvc) == VDIR) { /* directory */ if (writing) { code = EISDIR; goto done; } else { if (!afs_AccessOK (tvc, ((tvc->f.states & CForeign) ? PRSFS_READ : PRSFS_LOOKUP), &treq, CHECK_MODE_BITS)) { code = EACCES; /* printf("afs_Open: no access for dir\n"); */ goto done; } } } else { #ifdef AFS_SUN5_ENV if (AFS_NFSXLATORREQ(acred) && (aflags & FREAD)) { if (!afs_AccessOK (tvc, PRSFS_READ, &treq, CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) { code = EACCES; goto done; } } #endif #ifdef AFS_AIX41_ENV if (aflags & FRSHARE) { /* * Hack for AIX 4.1: * Apparently it is possible for a file to get mapped without * either VNOP_MAP or VNOP_RDWR being called, if (1) it is a * sharable library, and (2) it has already been loaded. We must * ensure that the credp is up to date. We detect the situation * by checking for O_RSHARE at open time. */ /* * We keep the caller's credentials since an async daemon will * handle the request at some point. We assume that the same * credentials will be used. */ ObtainWriteLock(&tvc->lock, 140); if (!tvc->credp || (tvc->credp != acred)) { crhold(acred); if (tvc->credp) { struct ucred *crp = tvc->credp; tvc->credp = NULL; crfree(crp); } tvc->credp = acred; } ReleaseWriteLock(&tvc->lock); } #endif /* normal file or symlink */ osi_FlushText(tvc); /* only needed to flush text if text locked last time */ #ifdef AFS_BOZONLOCK_ENV afs_BozonLock(&tvc->pvnLock, tvc); #endif osi_FlushPages(tvc, acred); #ifdef AFS_BOZONLOCK_ENV afs_BozonUnlock(&tvc->pvnLock, tvc); #endif } /* set date on file if open in O_TRUNC mode */ if (aflags & FTRUNC) { /* this fixes touch */ ObtainWriteLock(&tvc->lock, 123); tvc->f.m.Date = osi_Time(); tvc->f.states |= CDirty; ReleaseWriteLock(&tvc->lock); } ObtainReadLock(&tvc->lock); if (writing) tvc->execsOrWriters++; tvc->opens++; #if defined(AFS_SGI_ENV) || defined (AFS_LINUX26_ENV) if (writing && tvc->cred == NULL) { crhold(acred); tvc->cred = acred; } #endif ReleaseReadLock(&tvc->lock); if ((afs_preCache != 0) && (writing == 0) && (vType(tvc) != VDIR) && (!afs_BBusy())) { struct dcache *tdc; afs_size_t offset, len; tdc = afs_GetDCache(tvc, 0, &treq, &offset, &len, 1); ObtainSharedLock(&tdc->mflock, 865); if (!(tdc->mflags & DFFetchReq)) { struct brequest *bp; /* start the daemon (may already be running, however) */ UpgradeSToWLock(&tdc->mflock, 666); tdc->mflags |= DFFetchReq; /* guaranteed to be cleared by BKG or GetDCache */ /* last parm (1) tells bkg daemon to do an afs_PutDCache when it is done, since we don't want to wait for it to finish before doing so ourselves. */ bp = afs_BQueue(BOP_FETCH, tvc, B_DONTWAIT, 0, acred, (afs_size_t) 0, (afs_size_t) 1, tdc, (void *)0, (void *)0); if (!bp) { tdc->mflags &= ~DFFetchReq; } ReleaseWriteLock(&tdc->mflock); } else { ReleaseSharedLock(&tdc->mflock); } } done: afs_PutFakeStat(&fakestate); AFS_DISCON_UNLOCK(); code = afs_CheckCode(code, &treq, 4); /* avoid AIX -O bug */ afs_Trace2(afs_iclSetp, CM_TRACE_OPEN, ICL_TYPE_POINTER, tvc, ICL_TYPE_INT32, 999999); return code; }
int afs_InvalidateAllSegments(struct vcache *avc) { struct dcache *tdc; afs_int32 hash; afs_int32 index; struct dcache **dcList; int i, dcListMax, dcListCount; AFS_STATCNT(afs_InvalidateAllSegments); afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); hash = DVHash(&avc->f.fid); avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */ avc->f.states &= ~CExtendedFile; /* not any more */ ObtainWriteLock(&afs_xcbhash, 459); afs_DequeueCallback(avc); avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */ ReleaseWriteLock(&afs_xcbhash); if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR)) osi_dnlc_purgedp(avc); /* Blow away pages; for now, only for Solaris */ #if (defined(AFS_SUN5_ENV)) if (WriteLocked(&avc->lock)) osi_ReleaseVM(avc, (afs_ucred_t *)0); #endif /* * Block out others from screwing with this table; is a read lock * sufficient? */ ObtainWriteLock(&afs_xdcache, 286); dcListMax = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetDSlot(index, 0); ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) dcListMax++; afs_PutDCache(tdc); } index = afs_dvnextTbl[index]; } dcList = osi_Alloc(dcListMax * sizeof(struct dcache *)); dcListCount = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetDSlot(index, 0); ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) { /* same file? we'll zap it */ if (afs_indexFlags[index] & IFDataMod) { afs_stats_cmperf.cacheCurrDirtyChunks--; /* don't write it back */ afs_indexFlags[index] &= ~IFDataMod; } afs_indexFlags[index] &= ~IFAnyPages; if (dcListCount < dcListMax) dcList[dcListCount++] = tdc; else afs_PutDCache(tdc); } else { afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); for (i = 0; i < dcListCount; i++) { tdc = dcList[i]; ObtainWriteLock(&tdc->lock, 679); ZapDCE(tdc); if (vType(avc) == VDIR) DZap(tdc); ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } osi_Free(dcList, dcListMax * sizeof(struct dcache *)); return 0; }
int afs_mkdir(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, struct vcache **avcp, afs_ucred_t *acred) { struct vrequest *treq = NULL; afs_int32 code; struct afs_conn *tc; struct rx_connection *rxconn; struct VenusFid newFid; struct dcache *tdc; struct dcache *new_dc; afs_size_t offset, len; struct vcache *tvc; struct AFSStoreStatus InStatus; struct AFSFetchStatus *OutFidStatus, *OutDirStatus; struct AFSCallBack CallBack; struct AFSVolSync tsync; afs_int32 now; struct afs_fakestat_state fakestate; XSTATS_DECLS; OSI_VC_CONVERT(adp); AFS_STATCNT(afs_mkdir); afs_Trace2(afs_iclSetp, CM_TRACE_MKDIR, ICL_TYPE_POINTER, adp, ICL_TYPE_STRING, aname); OutFidStatus = osi_AllocSmallSpace(sizeof(struct AFSFetchStatus)); OutDirStatus = osi_AllocSmallSpace(sizeof(struct AFSFetchStatus)); memset(&InStatus, 0, sizeof(InStatus)); if ((code = afs_CreateReq(&treq, acred))) goto done2; afs_InitFakeStat(&fakestate); if (strlen(aname) > AFSNAMEMAX) { code = ENAMETOOLONG; goto done3; } if (!afs_ENameOK(aname)) { code = EINVAL; goto done3; } AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&adp, &fakestate, treq); if (code) goto done; code = afs_VerifyVCache(adp, treq); if (code) goto done; /** If the volume is read-only, return error without making an RPC to the * fileserver */ if (adp->f.states & CRO) { code = EROFS; goto done; } if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { /*printf("Network is down in afs_mkdir\n");*/ code = ENETDOWN; goto done; } InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE | AFS_SETGROUP; InStatus.ClientModTime = osi_Time(); InStatus.UnixModeBits = attrs->va_mode & 0xffff; /* only care about protection bits */ InStatus.Group = (afs_int32) afs_cr_gid(acred); tdc = afs_GetDCache(adp, (afs_size_t) 0, treq, &offset, &len, 1); ObtainWriteLock(&adp->lock, 153); if (!AFS_IS_DISCON_RW) { do { tc = afs_Conn(&adp->f.fid, treq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_MAKEDIR); now = osi_Time(); RX_AFS_GUNLOCK(); code = RXAFS_MakeDir(rxconn, (struct AFSFid *)&adp->f.fid.Fid, aname, &InStatus, (struct AFSFid *)&newFid.Fid, OutFidStatus, OutDirStatus, &CallBack, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; CallBack.ExpirationTime += now; /* DON'T forget to Set the callback value... */ } else code = -1; } while (afs_Analyze (tc, rxconn, code, &adp->f.fid, treq, AFS_STATS_FS_RPCIDX_MAKEDIR, SHARED_LOCK, NULL)); if (code) { if (code < 0) { afs_StaleVCache(adp); } ReleaseWriteLock(&adp->lock); if (tdc) afs_PutDCache(tdc); goto done; } } else { /* Disconnected. */ /* We have the dir entry now, we can use it while disconnected. */ if (adp->mvid.target_root == NULL) { /* If not mount point, generate a new fid. */ newFid.Cell = adp->f.fid.Cell; newFid.Fid.Volume = adp->f.fid.Fid.Volume; afs_GenFakeFid(&newFid, VDIR, 1); } /* XXX: If mount point???*/ /* Operations with the actual dir's cache entry are further * down, where the dir entry gets created. */ } /* if (!AFS_IS_DISCON_RW) */ /* otherwise, we should see if we can make the change to the dir locally */ if (tdc) ObtainWriteLock(&tdc->lock, 632); if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, OutDirStatus, 1)) { /* we can do it locally */ ObtainWriteLock(&afs_xdcache, 294); code = afs_dir_Create(tdc, aname, &newFid.Fid); ReleaseWriteLock(&afs_xdcache); if (code) { ZapDCE(tdc); /* surprise error -- use invalid value */ DZap(tdc); } } if (tdc) { ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } if (AFS_IS_DISCON_RW) /* We will have to settle with the local link count. */ adp->f.m.LinkCount++; else adp->f.m.LinkCount = OutDirStatus->LinkCount; newFid.Cell = adp->f.fid.Cell; newFid.Fid.Volume = adp->f.fid.Fid.Volume; ReleaseWriteLock(&adp->lock); if (AFS_IS_DISCON_RW) { /* When disconnected, we have to create the full dir here. */ /* Generate a new vcache and fill it. */ tvc = afs_NewVCache(&newFid, NULL); if (tvc) { *avcp = tvc; } else { code = EIO; goto done; } ObtainWriteLock(&tvc->lock, 738); afs_GenDisconStatus(adp, tvc, &newFid, attrs, treq, VDIR); ReleaseWriteLock(&tvc->lock); /* And now make an empty dir, containing . and .. : */ /* Get a new dcache for it first. */ new_dc = afs_GetDCache(tvc, (afs_size_t) 0, treq, &offset, &len, 1); if (!new_dc) { /* printf("afs_mkdir: can't get new dcache for dir.\n"); */ code = EIO; goto done; } ObtainWriteLock(&afs_xdcache, 739); code = afs_dir_MakeDir(new_dc, (afs_int32 *) &newFid.Fid, (afs_int32 *) &adp->f.fid.Fid); ReleaseWriteLock(&afs_xdcache); /* if (code) printf("afs_mkdir: afs_dirMakeDir code = %u\n", code); */ afs_PutDCache(new_dc); ObtainWriteLock(&tvc->lock, 731); /* Update length in the vcache. */ tvc->f.m.Length = new_dc->f.chunkBytes; afs_DisconAddDirty(tvc, VDisconCreate, 1); ReleaseWriteLock(&tvc->lock); } else { /* now we're done with parent dir, create the real dir's cache entry */ tvc = afs_GetVCache(&newFid, treq, NULL, NULL); if (tvc) { code = 0; *avcp = tvc; } else { /* For some reason, we cannot fetch the vcache for our * newly-created dir. */ code = EIO; } } /* if (AFS_DISCON_RW) */ done: AFS_DISCON_UNLOCK(); done3: afs_PutFakeStat(&fakestate); code = afs_CheckCode(code, treq, 26); afs_DestroyReq(treq); done2: osi_FreeSmallSpace(OutFidStatus); osi_FreeSmallSpace(OutDirStatus); return code; }
/* Note that we don't set CDirty here, this is OK because the unlink * RPC is called synchronously */ int afs_remove(OSI_VC_DECL(adp), char *aname, afs_ucred_t *acred) { struct vrequest treq; register struct dcache *tdc; struct VenusFid unlinkFid; register afs_int32 code; register struct vcache *tvc; afs_size_t offset, len; struct afs_fakestat_state fakestate; OSI_VC_CONVERT(adp); AFS_STATCNT(afs_remove); afs_Trace2(afs_iclSetp, CM_TRACE_REMOVE, ICL_TYPE_POINTER, adp, ICL_TYPE_STRING, aname); if ((code = afs_InitReq(&treq, acred))) { return code; } afs_InitFakeStat(&fakestate); AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&adp, &fakestate, &treq); if (code) goto done; /* Check if this is dynroot */ if (afs_IsDynroot(adp)) { code = afs_DynrootVOPRemove(adp, acred, aname); goto done; } if (afs_IsDynrootMount(adp)) { code = ENOENT; goto done; } if (strlen(aname) > AFSNAMEMAX) { code = ENAMETOOLONG; goto done; } tagain: code = afs_VerifyVCache(adp, &treq); tvc = NULL; if (code) { code = afs_CheckCode(code, &treq, 23); goto done; } /** If the volume is read-only, return error without making an RPC to the * fileserver */ if (adp->f.states & CRO) { code = EROFS; goto done; } /* If we're running disconnected without logging, go no further... */ if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { code = ENETDOWN; goto done; } tdc = afs_GetDCache(adp, (afs_size_t) 0, &treq, &offset, &len, 1); /* test for error below */ ObtainWriteLock(&adp->lock, 142); if (tdc) ObtainSharedLock(&tdc->lock, 638); /* * Make sure that the data in the cache is current. We may have * received a callback while we were waiting for the write lock. */ if (!(adp->f.states & CStatd) || (tdc && !hsame(adp->f.m.DataVersion, tdc->f.versionNo))) { ReleaseWriteLock(&adp->lock); if (tdc) { ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } goto tagain; } unlinkFid.Fid.Vnode = 0; if (!tvc) { tvc = osi_dnlc_lookup(adp, aname, WRITE_LOCK); } /* This should not be necessary since afs_lookup() has already * done the work. */ if (!tvc) if (tdc) { code = afs_dir_Lookup(tdc, aname, &unlinkFid.Fid); if (code == 0) { afs_int32 cached = 0; unlinkFid.Cell = adp->f.fid.Cell; unlinkFid.Fid.Volume = adp->f.fid.Fid.Volume; if (unlinkFid.Fid.Unique == 0) { tvc = afs_LookupVCache(&unlinkFid, &treq, &cached, adp, aname); } else { ObtainReadLock(&afs_xvcache); tvc = afs_FindVCache(&unlinkFid, 0, DO_STATS); ReleaseReadLock(&afs_xvcache); } } } if (AFS_IS_DISCON_RW) { if (!adp->f.shadow.vnode && !(adp->f.ddirty_flags & VDisconCreate)) { /* Make shadow copy of parent dir. */ afs_MakeShadowDir(adp, tdc); } /* Can't hold a dcache lock whilst we're getting a vcache one */ if (tdc) ReleaseSharedLock(&tdc->lock); /* XXX - We're holding adp->lock still, and we've got no * guarantee about whether the ordering matches the lock hierarchy */ ObtainWriteLock(&tvc->lock, 713); /* If we were locally created, then we don't need to do very * much beyond ensuring that we don't exist anymore */ if (tvc->f.ddirty_flags & VDisconCreate) { afs_DisconRemoveDirty(tvc); } else { /* Add removed file vcache to dirty list. */ afs_DisconAddDirty(tvc, VDisconRemove, 1); } adp->f.m.LinkCount--; ReleaseWriteLock(&tvc->lock); if (tdc) ObtainSharedLock(&tdc->lock, 714); } if (tvc && osi_Active(tvc)) { /* about to delete whole file, prefetch it first */ ReleaseWriteLock(&adp->lock); if (tdc) ReleaseSharedLock(&tdc->lock); ObtainWriteLock(&tvc->lock, 143); FetchWholeEnchilada(tvc, &treq); ReleaseWriteLock(&tvc->lock); ObtainWriteLock(&adp->lock, 144); /* Technically I don't think we need this back, but let's hold it anyway; The "got" reference should actually be sufficient. */ if (tdc) ObtainSharedLock(&tdc->lock, 640); } osi_dnlc_remove(adp, aname, tvc); Tadp1 = adp; #ifndef AFS_DARWIN80_ENV Tadpr = VREFCOUNT(adp); #endif Ttvc = tvc; Tnam = aname; Tnam1 = 0; #ifndef AFS_DARWIN80_ENV if (tvc) Ttvcr = VREFCOUNT(tvc); #endif #ifdef AFS_AIX_ENV if (tvc && VREFCOUNT_GT(tvc, 2) && tvc->opens > 0 && !(tvc->f.states & CUnlinked)) { #else if (tvc && VREFCOUNT_GT(tvc, 1) && tvc->opens > 0 && !(tvc->f.states & CUnlinked)) { #endif char *unlname = afs_newname(); ReleaseWriteLock(&adp->lock); if (tdc) ReleaseSharedLock(&tdc->lock); code = afsrename(adp, aname, adp, unlname, acred, &treq); Tnam1 = unlname; if (!code) { struct VenusFid *oldmvid = NULL; if (tvc->mvid) oldmvid = tvc->mvid; tvc->mvid = (struct VenusFid *)unlname; if (oldmvid) osi_FreeSmallSpace(oldmvid); crhold(acred); if (tvc->uncred) { crfree(tvc->uncred); } tvc->uncred = acred; tvc->f.states |= CUnlinked; /* if rename succeeded, remove should not */ ObtainWriteLock(&tvc->lock, 715); if (tvc->f.ddirty_flags & VDisconRemove) { tvc->f.ddirty_flags &= ~VDisconRemove; } ReleaseWriteLock(&tvc->lock); } else { osi_FreeSmallSpace(unlname); } if (tdc) afs_PutDCache(tdc); afs_PutVCache(tvc); } else { code = afsremove(adp, tdc, tvc, aname, acred, &treq); } done: afs_PutFakeStat(&fakestate); #ifndef AFS_DARWIN80_ENV /* we can't track by thread, it's not exported in the KPI; only do this on !macos */ osi_Assert(!WriteLocked(&adp->lock) || (adp->lock.pid_writer != MyPidxx)); #endif AFS_DISCON_UNLOCK(); return code; } /* afs_remunlink -- This tries to delete the file at the server after it has * been renamed when unlinked locally but now has been finally released. * * CAUTION -- may be called with avc unheld. */ int afs_remunlink(register struct vcache *avc, register int doit) { afs_ucred_t *cred; char *unlname; struct vcache *adp; struct vrequest treq; struct VenusFid dirFid; register struct dcache *tdc; afs_int32 code = 0; if (NBObtainWriteLock(&avc->lock, 423)) return 0; #if defined(AFS_DARWIN80_ENV) if (vnode_get(AFSTOV(avc))) { ReleaseWriteLock(&avc->lock); return 0; } #endif if (avc->mvid && (doit || (avc->f.states & CUnlinkedDel))) { if ((code = afs_InitReq(&treq, avc->uncred))) { ReleaseWriteLock(&avc->lock); } else { /* Must bump the refCount because GetVCache may block. * Also clear mvid so no other thread comes here if we block. */ unlname = (char *)avc->mvid; avc->mvid = NULL; cred = avc->uncred; avc->uncred = NULL; #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV) VREF(AFSTOV(avc)); #else AFS_FAST_HOLD(avc); #endif /* We'll only try this once. If it fails, just release the vnode. * Clear after doing hold so that NewVCache doesn't find us yet. */ avc->f.states &= ~(CUnlinked | CUnlinkedDel); ReleaseWriteLock(&avc->lock); dirFid.Cell = avc->f.fid.Cell; dirFid.Fid.Volume = avc->f.fid.Fid.Volume; dirFid.Fid.Vnode = avc->f.parent.vnode; dirFid.Fid.Unique = avc->f.parent.unique; adp = afs_GetVCache(&dirFid, &treq, NULL, NULL); if (adp) { tdc = afs_FindDCache(adp, (afs_size_t) 0); ObtainWriteLock(&adp->lock, 159); if (tdc) ObtainSharedLock(&tdc->lock, 639); /* afsremove releases the adp & tdc locks, and does vn_rele(avc) */ code = afsremove(adp, tdc, avc, unlname, cred, &treq); afs_PutVCache(adp); } else { /* we failed - and won't be back to try again. */ afs_PutVCache(avc); } osi_FreeSmallSpace(unlname); crfree(cred); } } else { #if defined(AFS_DARWIN80_ENV) vnode_put(AFSTOV(avc)); #endif ReleaseWriteLock(&avc->lock); } return code; }
int afs_getattr(OSI_VC_DECL(avc), struct vattr *attrs, afs_ucred_t *acred) #endif { afs_int32 code; struct vrequest *treq = NULL; struct unixuser *au; int inited = 0; OSI_VC_CONVERT(avc); AFS_STATCNT(afs_getattr); afs_Trace2(afs_iclSetp, CM_TRACE_GETATTR, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); if (afs_fakestat_enable && avc->mvstat == AFS_MVSTAT_MTPT) { struct afs_fakestat_state fakestat; struct vrequest *ureq = NULL; code = afs_CreateReq(&ureq, acred); if (code) { return code; } afs_InitFakeStat(&fakestat); code = afs_TryEvalFakeStat(&avc, &fakestat, ureq); if (code) { afs_PutFakeStat(&fakestat); afs_DestroyReq(ureq); return code; } code = afs_CopyOutAttrs(avc, attrs); afs_PutFakeStat(&fakestat); afs_DestroyReq(ureq); return code; } #if defined(AFS_SUN5_ENV) if (flags & ATTR_HINT) { code = afs_CopyOutAttrs(avc, attrs); return code; } #endif #if defined(AFS_DARWIN_ENV) && !defined(AFS_DARWIN80_ENV) if (avc->f.states & CUBCinit) { code = afs_CopyOutAttrs(avc, attrs); return code; } #endif AFS_DISCON_LOCK(); if (afs_shuttingdown != AFS_RUNNING) { AFS_DISCON_UNLOCK(); return EIO; } if (!(avc->f.states & CStatd)) { if (!(code = afs_CreateReq(&treq, acred))) { code = afs_VerifyVCache2(avc, treq); inited = 1; } } else code = 0; #if defined(AFS_SUN5_ENV) if (code == 0) osi_FlushPages(avc, acred); #endif if (code == 0) { osi_FlushText(avc); /* only needed to flush text if text locked last time */ code = afs_CopyOutAttrs(avc, attrs); if (afs_nfsexporter) { if (!inited) { if ((code = afs_CreateReq(&treq, acred))) { return code; } inited = 1; } if (AFS_NFSXLATORREQ(acred)) { if ((vType(avc) != VDIR) && !afs_AccessOK(avc, PRSFS_READ, treq, CHECK_MODE_BITS | CMB_ALLOW_EXEC_AS_READ)) { afs_DestroyReq(treq); return EACCES; } } if ((au = afs_FindUser(treq->uid, -1, READ_LOCK))) { struct afs_exporter *exporter = au->exporter; if (exporter && !(afs_nfsexporter->exp_states & EXP_UNIXMODE)) { unsigned int ubits; /* * If the remote user wishes to enforce default Unix mode semantics, * like in the nfs exporter case, we OR in the user bits * into the group and other bits. We need to do this * because there is no RFS_ACCESS call and thus nfs * clients implement nfs_access by interpreting the * mode bits in the traditional way, which of course * loses with afs. */ ubits = (attrs->va_mode & 0700) >> 6; attrs->va_mode = attrs->va_mode | ubits | (ubits << 3); /* If it's the root of AFS, replace the inode number with the * inode number of the mounted on directory; otherwise this * confuses getwd()... */ #ifdef AFS_LINUX22_ENV if (avc == afs_globalVp) { struct inode *ip = AFSTOV(avc)->i_sb->s_root->d_inode; attrs->va_nodeid = ip->i_ino; /* VTOI()? */ } #else if ( #if defined(AFS_DARWIN_ENV) vnode_isvroot(AFSTOV(avc)) #elif defined(AFS_NBSD50_ENV) AFSTOV(avc)->v_vflag & VV_ROOT #else AFSTOV(avc)->v_flag & VROOT #endif ) { struct vnode *vp = AFSTOV(avc); #ifdef AFS_DARWIN80_ENV /* XXX vp = vnode_mount(vp)->mnt_vnodecovered; */ vp = 0; #else vp = vp->v_vfsp->vfs_vnodecovered; if (vp) { /* Ignore weird failures */ #ifdef AFS_SGI62_ENV attrs->va_nodeid = VnodeToIno(vp); #else struct inode *ip; ip = (struct inode *)VTOI(vp); if (ip) /* Ignore weird failures */ attrs->va_nodeid = ip->i_number; #endif } #endif } #endif /* AFS_LINUX22_ENV */ } afs_PutUser(au, READ_LOCK); } }
int afs_StoreAllSegments(struct vcache *avc, struct vrequest *areq, int sync) { struct dcache *tdc; afs_int32 code = 0; afs_int32 index; afs_int32 origCBs, foreign = 0; int hash; afs_hyper_t newDV, oldDV; /* DV when we start, and finish, respectively */ struct dcache **dcList; unsigned int i, j, minj, moredata, high, off; afs_size_t maxStoredLength; /* highest offset we've written to server. */ int safety, marineronce = 0; AFS_STATCNT(afs_StoreAllSegments); hset(oldDV, avc->f.m.DataVersion); hset(newDV, avc->f.m.DataVersion); hash = DVHash(&avc->f.fid); foreign = (avc->f.states & CForeign); dcList = osi_AllocLargeSpace(AFS_LRALLOCSIZ); afs_Trace2(afs_iclSetp, CM_TRACE_STOREALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); #if !defined(AFS_AIX32_ENV) && !defined(AFS_SGI65_ENV) /* In the aix vm implementation we need to do the vm_writep even * on the memcache case since that's we adjust the file's size * and finish flushing partial vm pages. */ if ((cacheDiskType != AFS_FCACHE_TYPE_MEM) || (sync & AFS_VMSYNC_INVAL) || (sync & AFS_VMSYNC) || (sync & AFS_LASTSTORE)) #endif /* !AFS_AIX32_ENV && !AFS_SGI65_ENV */ { /* If we're not diskless, reading a file may stress the VM * system enough to cause a pageout, and this vnode would be * locked when the pageout occurs. We can prevent this problem * by making sure all dirty pages are already flushed. We don't * do this when diskless because reading a diskless (i.e. * memory-resident) chunk doesn't require using new VM, and we * also don't want to dump more dirty data into a diskless cache, * since they're smaller, and we might exceed its available * space. */ #if defined(AFS_SUN5_ENV) if (sync & AFS_VMSYNC_INVAL) /* invalidate VM pages */ osi_VM_TryToSmush(avc, CRED(), 1); else #endif osi_VM_StoreAllSegments(avc); } if (AFS_IS_DISCONNECTED && !AFS_IN_SYNC) { /* This will probably make someone sad ... */ /*printf("Net down in afs_StoreSegments\n");*/ return ENETDOWN; } ConvertWToSLock(&avc->lock); /* * Subsequent code expects a sorted list, and it expects all the * chunks in the list to be contiguous, so we need a sort and a * while loop in here, too - but this will work for a first pass... * 92.10.05 - OK, there's a sort in here now. It's kind of a modified * bin sort, I guess. Chunk numbers start with 0 * * - Have to get a write lock on xdcache because GetDSlot might need it (if * the chunk doesn't have a dcache struct). * This seems like overkill in most cases. * - I'm not sure that it's safe to do "index = .hvNextp", then unlock * xdcache, then relock xdcache and try to use index. It is done * a lot elsewhere in the CM, but I'm not buying that argument. * - should be able to check IFDataMod without doing the GetDSlot (just * hold afs_xdcache). That way, it's easy to do this without the * writelock on afs_xdcache, and we save unneccessary disk * operations. I don't think that works, 'cuz the next pointers * are still on disk. */ origCBs = afs_allCBs; maxStoredLength = 0; minj = 0; do { memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *)); high = 0; moredata = FALSE; /* lock and start over from beginning of hash chain * in order to avoid a race condition. */ ObtainWriteLock(&afs_xdcache, 284); index = afs_dvhashTbl[hash]; for (j = 0; index != NULLIDX;) { if ((afs_indexFlags[index] & IFDataMod) && (afs_indexUnique[index] == avc->f.fid.Fid.Unique)) { tdc = afs_GetValidDSlot(index); /* refcount+1. */ if (!tdc) { ReleaseWriteLock(&afs_xdcache); code = EIO; goto done; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) { off = tdc->f.chunk - minj; if (off < NCHUNKSATONCE) { if (dcList[off]) osi_Panic("dclist slot already in use!"); if (afs_mariner && !marineronce) { /* first chunk only */ afs_MarinerLog("store$Storing", avc); marineronce++; } dcList[off] = tdc; if (off > high) high = off; j++; /* DCLOCKXXX: chunkBytes is protected by tdc->lock which we * can't grab here, due to lock ordering with afs_xdcache. * So, disable this shortcut for now. -- kolya 2001-10-13 */ /* shortcut: big win for little files */ /* tlen -= tdc->f.chunkBytes; * if (tlen <= 0) * break; */ } else { moredata = TRUE; afs_PutDCache(tdc); if (j == NCHUNKSATONCE) break; } } else { afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); /* this guy writes chunks, puts back dcache structs, and bumps newDV */ /* "moredata" just says "there are more dirty chunks yet to come". */ if (j) { code = afs_CacheStoreVCache(dcList, avc, areq, sync, minj, high, moredata, &newDV, &maxStoredLength); /* Release any zero-length dcache entries in our interval * that we locked but didn't store back above. */ for (j = 0; j <= high; j++) { tdc = dcList[j]; if (tdc) { osi_Assert(tdc->f.chunkBytes == 0); ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } } } /* if (j) */ minj += NCHUNKSATONCE; } while (!code && moredata); done: UpgradeSToWLock(&avc->lock, 29); /* send a trivial truncation store if did nothing else */ if (code == 0) { /* * Call StoreMini if we haven't written enough data to extend the * file at the fileserver to the client's notion of the file length. */ if ((avc->f.truncPos != AFS_NOTRUNC) || ((avc->f.states & CExtendedFile) && (maxStoredLength < avc->f.m.Length))) { code = afs_StoreMini(avc, areq); if (code == 0) hadd32(newDV, 1); /* just bumped here, too */ } avc->f.states &= ~CExtendedFile; } /* * Finally, turn off DWriting, turn on DFEntryMod, * update f.versionNo. * A lot of this could be integrated into the loop above */ if (!code) { afs_hyper_t h_unset; hones(h_unset); minj = 0; do { moredata = FALSE; memset(dcList, 0, NCHUNKSATONCE * sizeof(struct dcache *)); /* overkill, but it gets the lock in case GetDSlot needs it */ ObtainWriteLock(&afs_xdcache, 285); for (j = 0, safety = 0, index = afs_dvhashTbl[hash]; index != NULLIDX && safety < afs_cacheFiles + 2; index = afs_dvnextTbl[index]) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* This is okay; since manipulating the dcaches at this * point is best-effort. We only get a dcache here to * increment the dv and turn off DWriting. If we were * supposed to do that for a dcache, but could not * due to an I/O error, it just means the dv won't * be updated so we don't be able to use that cached * chunk in the future. That's inefficient, but not * an error. */ continue; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid) && tdc->f.chunk >= minj) { off = tdc->f.chunk - minj; if (off < NCHUNKSATONCE) { /* this is the file, and the correct chunk range */ if (j >= NCHUNKSATONCE) osi_Panic ("Too many dcache entries in range\n"); dcList[j++] = tdc; } else { moredata = TRUE; afs_PutDCache(tdc); if (j == NCHUNKSATONCE) break; } } else { afs_PutDCache(tdc); } } } ReleaseWriteLock(&afs_xdcache); for (i = 0; i < j; i++) { /* Iterate over the dcache entries we collected above */ tdc = dcList[i]; ObtainSharedLock(&tdc->lock, 677); /* was code here to clear IFDataMod, but it should only be done * in storedcache and storealldcache. */ /* Only increase DV if we had up-to-date data to start with. * Otherwise, we could be falsely upgrading an old chunk * (that we never read) into one labelled with the current * DV #. Also note that we check that no intervening stores * occurred, otherwise we might mislabel cache information * for a chunk that we didn't store this time */ /* Don't update the version number if it's not yet set. */ if (!hsame(tdc->f.versionNo, h_unset) && hcmp(tdc->f.versionNo, oldDV) >= 0) { if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV)) || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) { /* no error, this is the DV */ UpgradeSToWLock(&tdc->lock, 678); hset(tdc->f.versionNo, avc->f.m.DataVersion); tdc->dflags |= DFEntryMod; /* DWriting may not have gotten cleared above, if all * we did was a StoreMini */ tdc->f.states &= ~DWriting; ConvertWToSLock(&tdc->lock); } } ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } minj += NCHUNKSATONCE; } while (moredata); } if (code) { /* * Invalidate chunks after an error for ccores files since * afs_inactive won't be called for these and they won't be * invalidated. Also discard data if it's a permanent error from the * fileserver. */ if (areq->permWriteError || (avc->f.states & CCore)) { afs_InvalidateAllSegments(avc); } } afs_Trace3(afs_iclSetp, CM_TRACE_STOREALLDONE, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, avc->f.m.Length, ICL_TYPE_INT32, code); /* would like a Trace5, but it doesn't exist... */ afs_Trace3(afs_iclSetp, CM_TRACE_AVCLOCKER, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32, avc->lock.excl_locked); afs_Trace4(afs_iclSetp, CM_TRACE_AVCLOCKEE, ICL_TYPE_POINTER, avc, ICL_TYPE_INT32, avc->lock.wait_states, ICL_TYPE_INT32, avc->lock.readers_reading, ICL_TYPE_INT32, avc->lock.num_waiting); /* * Finally, if updated DataVersion matches newDV, we did all of the * stores. If mapDV indicates that the page cache was flushed up * to when we started the store, then we can relabel them as flushed * as recently as newDV. * Turn off CDirty bit because the stored data is now in sync with server. */ if (code == 0 && hcmp(avc->mapDV, oldDV) >= 0) { if ((!(afs_dvhack || foreign) && hsame(avc->f.m.DataVersion, newDV)) || ((afs_dvhack || foreign) && (origCBs == afs_allCBs))) { hset(avc->mapDV, newDV); avc->f.states &= ~CDirty; } } osi_FreeLargeSpace(dcList); /* If not the final write a temporary error is ok. */ if (code && !areq->permWriteError && !(sync & AFS_LASTSTORE)) code = 0; return code; } /*afs_StoreAllSegments (new 03/02/94) */
/* don't set CDirty in here because RPC is called synchronously */ int afs_symlink(OSI_VC_DECL(adp), char *aname, struct vattr *attrs, char *atargetName, struct vcache **tvcp, afs_ucred_t *acred) { afs_uint32 now = 0; struct vrequest *treq = NULL; afs_int32 code = 0; struct afs_conn *tc; struct VenusFid newFid; struct dcache *tdc; afs_size_t offset, len; afs_int32 alen; struct server *hostp = 0; struct vcache *tvc; struct AFSStoreStatus InStatus; struct AFSFetchStatus *OutFidStatus, *OutDirStatus; struct AFSCallBack CallBack; struct AFSVolSync tsync; struct volume *volp = 0; struct afs_fakestat_state fakestate; struct rx_connection *rxconn; XSTATS_DECLS; OSI_VC_CONVERT(adp); AFS_STATCNT(afs_symlink); afs_Trace2(afs_iclSetp, CM_TRACE_SYMLINK, ICL_TYPE_POINTER, adp, ICL_TYPE_STRING, aname); OutFidStatus = osi_AllocSmallSpace(sizeof(struct AFSFetchStatus)); OutDirStatus = osi_AllocSmallSpace(sizeof(struct AFSFetchStatus)); memset(&InStatus, 0, sizeof(InStatus)); if ((code = afs_CreateReq(&treq, acred))) goto done2; afs_InitFakeStat(&fakestate); AFS_DISCON_LOCK(); code = afs_EvalFakeStat(&adp, &fakestate, treq); if (code) goto done; if (strlen(aname) > AFSNAMEMAX || strlen(atargetName) > AFSPATHMAX) { code = ENAMETOOLONG; goto done; } if (afs_IsDynroot(adp)) { code = afs_DynrootVOPSymlink(adp, acred, aname, atargetName); goto done; } if (afs_IsDynrootMount(adp)) { code = EROFS; goto done; } code = afs_VerifyVCache(adp, treq); if (code) { code = afs_CheckCode(code, treq, 30); goto done; } /** If the volume is read-only, return error without making an RPC to the * fileserver */ if (adp->f.states & CRO) { code = EROFS; goto done; } if (AFS_IS_DISCONNECTED && !AFS_IS_DISCON_RW) { code = ENETDOWN; goto done; } InStatus.Mask = AFS_SETMODTIME | AFS_SETMODE; InStatus.ClientModTime = osi_Time(); alen = strlen(atargetName); /* we want it to include the null */ if ( (*atargetName == '#' || *atargetName == '%') && alen > 1 && atargetName[alen-1] == '.') { InStatus.UnixModeBits = 0644; /* mt pt: null from "." at end */ if (alen == 1) alen++; /* Empty string */ } else { InStatus.UnixModeBits = 0755; alen++; /* add in the null */ } tdc = afs_GetDCache(adp, (afs_size_t) 0, treq, &offset, &len, 1); volp = afs_FindVolume(&adp->f.fid, READ_LOCK); /*parent is also in same vol */ ObtainWriteLock(&adp->lock, 156); if (tdc) ObtainWriteLock(&tdc->lock, 636); /* No further locks: if the SymLink succeeds, it does not matter what happens * to our local copy of the directory. If somebody tampers with it in the meantime, * the copy will be invalidated */ if (!AFS_IS_DISCON_RW) { do { tc = afs_Conn(&adp->f.fid, treq, SHARED_LOCK, &rxconn); if (tc) { hostp = tc->parent->srvr->server; XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SYMLINK); if (adp->f.states & CForeign) { now = osi_Time(); RX_AFS_GUNLOCK(); code = RXAFS_DFSSymlink(rxconn, (struct AFSFid *)&adp->f.fid.Fid, aname, atargetName, &InStatus, (struct AFSFid *)&newFid.Fid, OutFidStatus, OutDirStatus, &CallBack, &tsync); RX_AFS_GLOCK(); } else { RX_AFS_GUNLOCK(); code = RXAFS_Symlink(rxconn, (struct AFSFid *)&adp->f.fid.Fid, aname, atargetName, &InStatus, (struct AFSFid *)&newFid.Fid, OutFidStatus, OutDirStatus, &tsync); RX_AFS_GLOCK(); } XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &adp->f.fid, treq, AFS_STATS_FS_RPCIDX_SYMLINK, SHARED_LOCK, NULL)); } else { newFid.Cell = adp->f.fid.Cell; newFid.Fid.Volume = adp->f.fid.Fid.Volume; afs_GenFakeFid(&newFid, VREG, 0); } ObtainWriteLock(&afs_xvcache, 40); if (code) { if (code < 0) { afs_StaleVCache(adp); } ReleaseWriteLock(&adp->lock); ReleaseWriteLock(&afs_xvcache); if (tdc) { ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } goto done; } /* otherwise, we should see if we can make the change to the dir locally */ if (AFS_IS_DISCON_RW || afs_LocalHero(adp, tdc, OutDirStatus, 1)) { /* we can do it locally */ ObtainWriteLock(&afs_xdcache, 293); /* If the following fails because the name has been created in the meantime, the * directory is out-of-date - the file server knows best! */ code = afs_dir_Create(tdc, aname, &newFid.Fid); ReleaseWriteLock(&afs_xdcache); if (code && !AFS_IS_DISCON_RW) { ZapDCE(tdc); /* surprise error -- use invalid value */ DZap(tdc); } } if (tdc) { ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } newFid.Cell = adp->f.fid.Cell; newFid.Fid.Volume = adp->f.fid.Fid.Volume; ReleaseWriteLock(&adp->lock); /* now we're done with parent dir, create the link's entry. Note that * no one can get a pointer to the new cache entry until we release * the xvcache lock. */ tvc = afs_NewVCache(&newFid, hostp); if (!tvc) { code = -2; ReleaseWriteLock(&afs_xvcache); goto done; } ObtainWriteLock(&tvc->lock, 157); ObtainWriteLock(&afs_xcbhash, 500); tvc->f.states |= CStatd; /* have valid info */ tvc->f.states &= ~CBulkFetching; if (adp->f.states & CForeign) { tvc->f.states |= CForeign; /* We don't have to worry about losing the callback since we're doing it * under the afs_xvcache lock actually, afs_NewVCache may drop the * afs_xvcache lock, if it calls afs_FlushVCache */ tvc->cbExpires = CallBack.ExpirationTime + now; afs_QueueCallback(tvc, CBHash(CallBack.ExpirationTime), volp); } else { tvc->cbExpires = 0x7fffffff; /* never expires, they can't change */ /* since it never expires, we don't have to queue the callback */ } ReleaseWriteLock(&afs_xcbhash); if (AFS_IS_DISCON_RW) { attrs->va_mode = InStatus.UnixModeBits; afs_GenDisconStatus(adp, tvc, &newFid, attrs, treq, VLNK); code = afs_DisconCreateSymlink(tvc, atargetName, treq); if (code) { /* XXX - When this goes wrong, we need to tidy up the changes we made to * the parent, and get rid of the vcache we just created */ ReleaseWriteLock(&tvc->lock); ReleaseWriteLock(&afs_xvcache); afs_PutVCache(tvc); goto done; } afs_DisconAddDirty(tvc, VDisconCreate, 0); } else { afs_ProcessFS(tvc, OutFidStatus, treq); } if (!tvc->linkData) { tvc->linkData = afs_osi_Alloc(alen); osi_Assert(tvc->linkData != NULL); strncpy(tvc->linkData, atargetName, alen - 1); tvc->linkData[alen - 1] = 0; } ReleaseWriteLock(&tvc->lock); ReleaseWriteLock(&afs_xvcache); if (tvcp) *tvcp = tvc; else afs_PutVCache(tvc); code = 0; done: afs_PutFakeStat(&fakestate); if (volp) afs_PutVolume(volp, READ_LOCK); AFS_DISCON_UNLOCK(); code = afs_CheckCode(code, treq, 31); afs_DestroyReq(treq); done2: osi_FreeSmallSpace(OutFidStatus); osi_FreeSmallSpace(OutDirStatus); return code; }
int afs_InvalidateAllSegments(struct vcache *avc) { struct dcache *tdc; afs_int32 hash; afs_int32 index; struct dcache **dcList; int i, dcListMax, dcListCount; AFS_STATCNT(afs_InvalidateAllSegments); afs_Trace2(afs_iclSetp, CM_TRACE_INVALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length)); hash = DVHash(&avc->f.fid); avc->f.truncPos = AFS_NOTRUNC; /* don't truncate later */ avc->f.states &= ~CExtendedFile; /* not any more */ ObtainWriteLock(&afs_xcbhash, 459); afs_DequeueCallback(avc); avc->f.states &= ~(CStatd | CDirty); /* mark status information as bad, too */ ReleaseWriteLock(&afs_xcbhash); if (avc->f.fid.Fid.Vnode & 1 || (vType(avc) == VDIR)) osi_dnlc_purgedp(avc); /* Blow away pages; for now, only for Solaris */ #if (defined(AFS_SUN5_ENV)) if (WriteLocked(&avc->lock)) osi_ReleaseVM(avc, (afs_ucred_t *)0); #endif /* * Block out others from screwing with this table; is a read lock * sufficient? */ ObtainWriteLock(&afs_xdcache, 286); dcListMax = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* In the case of fatal errors during stores, we MUST * invalidate all of the relevant chunks. Otherwise, the chunks * will be left with the 'new' data that was never successfully * written to the server, but the DV in the dcache is still the * old DV. So, we may indefinitely serve data to applications * that is not actually in the file on the fileserver. If we * cannot afs_GetValidDSlot the appropriate entries, currently * there is no way to ensure the dcache is invalidated. So for * now, to avoid risking serving bad data from the cache, panic * instead. */ osi_Panic("afs_InvalidateAllSegments tdc count"); } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) dcListMax++; afs_PutDCache(tdc); } index = afs_dvnextTbl[index]; } dcList = osi_Alloc(dcListMax * sizeof(struct dcache *)); dcListCount = 0; for (index = afs_dvhashTbl[hash]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* We cannot proceed after getting this error; we risk serving * incorrect data to applications. So panic instead. See the * above comment next to the previous afs_GetValidDSlot call * for details. */ osi_Panic("afs_InvalidateAllSegments tdc store"); } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) { /* same file? we'll zap it */ if (afs_indexFlags[index] & IFDataMod) { afs_stats_cmperf.cacheCurrDirtyChunks--; /* don't write it back */ afs_indexFlags[index] &= ~IFDataMod; } afs_indexFlags[index] &= ~IFAnyPages; if (dcListCount < dcListMax) dcList[dcListCount++] = tdc; else afs_PutDCache(tdc); } else { afs_PutDCache(tdc); } } index = afs_dvnextTbl[index]; } ReleaseWriteLock(&afs_xdcache); for (i = 0; i < dcListCount; i++) { tdc = dcList[i]; ObtainWriteLock(&tdc->lock, 679); ZapDCE(tdc); if (vType(avc) == VDIR) DZap(tdc); ReleaseWriteLock(&tdc->lock); afs_PutDCache(tdc); } osi_Free(dcList, dcListMax * sizeof(struct dcache *)); return 0; }
static int VLDB_Same(struct VenusFid *afid, struct vrequest *areq) { struct vrequest treq; struct afs_conn *tconn; int i, type = 0; union { struct vldbentry tve; struct nvldbentry ntve; struct uvldbentry utve; } *v; struct volume *tvp; struct cell *tcell; char *bp, tbuf[CVBS]; /* biggest volume id is 2^32, ~ 4*10^9 */ unsigned int changed; struct server *(oldhosts[NMAXNSERVERS]); AFS_STATCNT(CheckVLDB); afs_FinalizeReq(areq); if ((i = afs_InitReq(&treq, afs_osi_credp))) return DUNNO; v = afs_osi_Alloc(sizeof(*v)); tcell = afs_GetCell(afid->Cell, READ_LOCK); bp = afs_cv2string(&tbuf[CVBS], afid->Fid.Volume); do { VSleep(2); /* Better safe than sorry. */ tconn = afs_ConnByMHosts(tcell->cellHosts, tcell->vlport, tcell->cellNum, &treq, SHARED_LOCK); if (tconn) { if (tconn->srvr->server->flags & SNO_LHOSTS) { type = 0; RX_AFS_GUNLOCK(); i = VL_GetEntryByNameO(tconn->id, bp, &v->tve); RX_AFS_GLOCK(); } else if (tconn->srvr->server->flags & SYES_LHOSTS) { type = 1; RX_AFS_GUNLOCK(); i = VL_GetEntryByNameN(tconn->id, bp, &v->ntve); RX_AFS_GLOCK(); } else { type = 2; RX_AFS_GUNLOCK(); i = VL_GetEntryByNameU(tconn->id, bp, &v->utve); RX_AFS_GLOCK(); if (!(tconn->srvr->server->flags & SVLSRV_UUID)) { if (i == RXGEN_OPCODE) { type = 1; RX_AFS_GUNLOCK(); i = VL_GetEntryByNameN(tconn->id, bp, &v->ntve); RX_AFS_GLOCK(); if (i == RXGEN_OPCODE) { type = 0; tconn->srvr->server->flags |= SNO_LHOSTS; RX_AFS_GUNLOCK(); i = VL_GetEntryByNameO(tconn->id, bp, &v->tve); RX_AFS_GLOCK(); } else if (!i) tconn->srvr->server->flags |= SYES_LHOSTS; } else if (!i) tconn->srvr->server->flags |= SVLSRV_UUID; } lastcode = i; } } else i = -1; } while (afs_Analyze(tconn, i, NULL, &treq, -1, /* no op code for this */ SHARED_LOCK, tcell)); afs_PutCell(tcell, READ_LOCK); afs_Trace2(afs_iclSetp, CM_TRACE_CHECKVLDB, ICL_TYPE_FID, &afid, ICL_TYPE_INT32, i); if (i) { afs_osi_Free(v, sizeof(*v)); return DUNNO; } /* have info, copy into serverHost array */ changed = 0; tvp = afs_FindVolume(afid, WRITE_LOCK); if (tvp) { ObtainWriteLock(&tvp->lock, 107); for (i = 0; i < NMAXNSERVERS && tvp->serverHost[i]; i++) { oldhosts[i] = tvp->serverHost[i]; } if (type == 2) { InstallUVolumeEntry(tvp, &v->utve, afid->Cell, tcell, &treq); } else if (type == 1) { InstallNVolumeEntry(tvp, &v->ntve, afid->Cell); } else { InstallVolumeEntry(tvp, &v->tve, afid->Cell); } if (i < NMAXNSERVERS && tvp->serverHost[i]) { changed = 1; } for (--i; !changed && i >= 0; i--) { if (tvp->serverHost[i] != oldhosts[i]) { changed = 1; /* also happens if prefs change. big deal. */ } } ReleaseWriteLock(&tvp->lock); afs_PutVolume(tvp, WRITE_LOCK); } else { /* can't find volume */ tvp = afs_GetVolume(afid, &treq, WRITE_LOCK); if (tvp) { afs_PutVolume(tvp, WRITE_LOCK); afs_osi_Free(v, sizeof(*v)); return DIFFERENT; } else { afs_osi_Free(v, sizeof(*v)); return DUNNO; } } afs_osi_Free(v, sizeof(*v)); return (changed ? DIFFERENT : SAME); } /*VLDB_Same */
/** * @param aname Volume name. * @param acell Cell id. * @param agood * @param areq Request type. * @param locktype Type of lock to be used. * @return Volume or NULL if failure. */ static struct volume * afs_NewVolumeByName(char *aname, afs_int32 acell, int agood, struct vrequest *areq, afs_int32 locktype) { afs_int32 code, type = 0; struct volume *tv, *tv1; struct vldbentry *tve; struct nvldbentry *ntve; struct uvldbentry *utve; struct cell *tcell; char *tbuffer, *ve; struct afs_conn *tconn; struct vrequest treq; struct rx_connection *rxconn; if (strlen(aname) > VL_MAXNAMELEN) /* Invalid volume name */ return NULL; tcell = afs_GetCell(acell, READ_LOCK); if (!tcell) { return NULL; } /* allow null request if we don't care about ENODEV/ETIMEDOUT distinction */ if (!areq) areq = &treq; afs_Trace2(afs_iclSetp, CM_TRACE_GETVOL, ICL_TYPE_STRING, aname, ICL_TYPE_POINTER, aname); tbuffer = osi_AllocLargeSpace(AFS_LRALLOCSIZ); tve = (struct vldbentry *)(tbuffer + 1024); ntve = (struct nvldbentry *)tve; utve = (struct uvldbentry *)tve; afs_InitReq(&treq, afs_osi_credp); /* *must* be unauth for vldb */ do { tconn = afs_ConnByMHosts(tcell->cellHosts, tcell->vlport, tcell->cellNum, &treq, SHARED_LOCK, 0, &rxconn); if (tconn) { if (tconn->srvr->server->flags & SNO_LHOSTS) { type = 0; RX_AFS_GUNLOCK(); code = VL_GetEntryByNameO(rxconn, aname, tve); RX_AFS_GLOCK(); } else if (tconn->srvr->server->flags & SYES_LHOSTS) { type = 1; RX_AFS_GUNLOCK(); code = VL_GetEntryByNameN(rxconn, aname, ntve); RX_AFS_GLOCK(); } else { type = 2; RX_AFS_GUNLOCK(); code = VL_GetEntryByNameU(rxconn, aname, utve); RX_AFS_GLOCK(); if (!(tconn->srvr->server->flags & SVLSRV_UUID)) { if (code == RXGEN_OPCODE) { type = 1; RX_AFS_GUNLOCK(); code = VL_GetEntryByNameN(rxconn, aname, ntve); RX_AFS_GLOCK(); if (code == RXGEN_OPCODE) { type = 0; tconn->srvr->server->flags |= SNO_LHOSTS; RX_AFS_GUNLOCK(); code = VL_GetEntryByNameO(rxconn, aname, tve); RX_AFS_GLOCK(); } else if (!code) tconn->srvr->server->flags |= SYES_LHOSTS; } else if (!code) tconn->srvr->server->flags |= SVLSRV_UUID; } lastnvcode = code; } } else code = -1; } while (afs_Analyze(tconn, rxconn, code, NULL, &treq, -1, /* no op code for this */ SHARED_LOCK, tcell)); if (code) { /* If the client has yet to contact this cell and contact failed due * to network errors, mark the VLDB servers as back up. * That the client tried and failed can be determined from the * fact that there was a downtime incident, but CHasVolRef is not set. */ /* RT 48959 - unclear if this should really go */ #if 0 if (areq->networkError && !(tcell->states & CHasVolRef)) { int i; struct server *sp; struct srvAddr *sap; for (i = 0; i < AFS_MAXCELLHOSTS; i++) { if ((sp = tcell->cellHosts[i]) == NULL) break; for (sap = sp->addr; sap; sap = sap->next_sa) afs_MarkServerUpOrDown(sap, 0); } } #endif afs_CopyError(&treq, areq); osi_FreeLargeSpace(tbuffer); afs_PutCell(tcell, READ_LOCK); return NULL; } /* * Check to see if this cell has not yet referenced a volume. If * it hasn't, it's just about to change its status, and we need to mark * this fact down. Note that it is remotely possible that afs_SetupVolume * could fail and we would still not have a volume reference. */ if (!(tcell->states & CHasVolRef)) { tcell->states |= CHasVolRef; afs_stats_cmperf.numCellsContacted++; } /*First time a volume in this cell has been referenced */ if (type == 2) ve = (char *)utve; else if (type == 1) ve = (char *)ntve; else ve = (char *)tve; tv = afs_SetupVolume(0, aname, ve, tcell, agood, type, &treq); if ((agood == 3) && tv && tv->backVol) { /* * This means that very soon we'll ask for the BK volume so * we'll prefetch it (well we did already.) */ tv1 = afs_SetupVolume(tv->backVol, (char *)0, ve, tcell, 0, type, &treq); if (tv1) { tv1->refCount--; } } if ((agood >= 2) && tv && tv->roVol) { /* * This means that very soon we'll ask for the RO volume so * we'll prefetch it (well we did already.) */ tv1 = afs_SetupVolume(tv->roVol, NULL, ve, tcell, 0, type, &treq); if (tv1) { tv1->refCount--; } } osi_FreeLargeSpace(tbuffer); afs_PutCell(tcell, READ_LOCK); return tv; } /*afs_NewVolumeByName */
afs_root(struct mount *mp, struct vnode **vpp) #endif { int error; struct vrequest treq; struct vcache *tvp = 0; struct vcache *gvp; #if !defined(AFS_FBSD53_ENV) || defined(AFS_FBSD80_ENV) struct thread *td = curthread; #endif struct ucred *cr = osi_curcred(); AFS_GLOCK(); AFS_STATCNT(afs_root); crhold(cr); tryagain: if (afs_globalVp && (afs_globalVp->f.states & CStatd)) { tvp = afs_globalVp; error = 0; } else { if (!(error = afs_InitReq(&treq, cr)) && !(error = afs_CheckInit())) { tvp = afs_GetVCache(&afs_rootFid, &treq, NULL, NULL); /* we really want this to stay around */ if (tvp) { gvp = afs_globalVp; afs_globalVp = tvp; if (gvp) { afs_PutVCache(gvp); if (tvp != afs_globalVp) { /* someone raced us and won */ afs_PutVCache(tvp); goto tryagain; } } } else error = ENOENT; } } if (tvp) { struct vnode *vp = AFSTOV(tvp); ASSERT_VI_UNLOCKED(vp, "afs_root"); AFS_GUNLOCK(); error = vget(vp, LK_EXCLUSIVE | LK_RETRY, td); AFS_GLOCK(); /* we dropped the glock, so re-check everything it had serialized */ if (!afs_globalVp || !(afs_globalVp->f.states & CStatd) || tvp != afs_globalVp) { vput(vp); afs_PutVCache(tvp); goto tryagain; } if (error != 0) goto tryagain; /* * I'm uncomfortable about this. Shouldn't this happen at a * higher level, and shouldn't we busy the top-level directory * to prevent recycling? */ vp->v_vflag |= VV_ROOT; afs_globalVFS = mp; *vpp = vp; } afs_Trace2(afs_iclSetp, CM_TRACE_VFSROOT, ICL_TYPE_POINTER, tvp ? AFSTOV(tvp) : NULL, ICL_TYPE_INT32, error); AFS_GUNLOCK(); crfree(cr); return error; }