/*! * Terminate the AFSDB handler, used on shutdown. */ void afs_StopAFSDB(void) { if (afsdb_handler_running) { afs_osi_Wakeup(&afsdb_req); } else { afsdb_handler_shutdown = 1; afs_termState = AFSOP_STOP_RXEVENT; afs_osi_Wakeup(&afs_termState); } }
/*! * Terminate the AFSDB handler, used on shutdown. */ void afs_StopAFSDB(void) { if (afsdb_handler_running) { afs_osi_Wakeup(&afsdb_req); } else { afsdb_handler_shutdown = 1; #if defined(AFS_SUN5_ENV) || defined(RXK_LISTENER_ENV) || defined(RXK_UPCALL_ENV) afs_termState = AFSOP_STOP_RXEVENT; #else afs_termState = AFSOP_STOP_COMPLETE; #endif afs_osi_Wakeup(&afs_termState); } }
void afs_CheckServerDaemon(void) { afs_int32 now, delay, lastCheck, last10MinCheck; afs_CheckServerDaemonStarted = 1; while (afs_initState < 101) afs_osi_Sleep(&afs_initState); afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0); last10MinCheck = lastCheck = osi_Time(); while (1) { if (afs_termState == AFSOP_STOP_CS) { afs_termState = AFSOP_STOP_TRUNCDAEMON; afs_osi_Wakeup(&afs_termState); break; } now = osi_Time(); if (afs_probe_interval + lastCheck <= now) { afs_CheckServers(1, NULL); /* check down servers */ lastCheck = now = osi_Time(); } if (afs_probe_all_interval + last10MinCheck <= now) { afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, afs_probe_all_interval); afs_CheckServers(0, NULL); last10MinCheck = now = osi_Time(); } /* shutdown check. */ if (afs_termState == AFSOP_STOP_CS) { afs_termState = AFSOP_STOP_TRUNCDAEMON; afs_osi_Wakeup(&afs_termState); break; } /* Compute time to next probe. */ delay = afs_probe_interval + lastCheck; if (delay > afs_probe_all_interval + last10MinCheck) delay = afs_probe_all_interval + last10MinCheck; delay -= now; if (delay < 1) delay = 1; afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0); } afs_CheckServerDaemonStarted = 0; }
/*! * \brief Query the AFSDB handler and wait for response. * \param acellName * \return 0 for success. < 0 is error. */ static int afs_GetCellHostsAFSDB(char *acellName) { AFS_ASSERT_GLOCK(); if (!afsdb_handler_running) return ENOENT; ObtainWriteLock(&afsdb_client_lock, 685); ObtainWriteLock(&afsdb_req_lock, 686); afsdb_req.cellname = acellName; afsdb_req.complete = 0; afsdb_req.pending = 1; afs_osi_Wakeup(&afsdb_req); ConvertWToRLock(&afsdb_req_lock); while (afsdb_handler_running && !afsdb_req.complete) { ReleaseReadLock(&afsdb_req_lock); afs_osi_Sleep(&afsdb_req); ObtainReadLock(&afsdb_req_lock); }; ReleaseReadLock(&afsdb_req_lock); ReleaseWriteLock(&afsdb_client_lock); if (afsdb_req.cellname) { return 0; } else return ENOENT; }
void afs_Daemon(void) { afs_int32 now, last10MinCheck, last60MinCheck; last10MinCheck = 0; last60MinCheck = 0; while (1) { rx_CheckPackets(); now = osi_Time(); if (last10MinCheck + 600 < now) { afs_GCUserData(); } if (last60MinCheck + 3600 < now) { afs_int32 didany; afs_GCPAGs(&didany); } now = 20000 - (osi_Time() - now); afs_osi_Wait(now, &AFS_WaitHandler, 0); if (afs_termState == AFSOP_STOP_AFS) { #if defined(RXK_LISTENER_ENV) afs_termState = AFSOP_STOP_RXEVENT; #else afs_termState = AFSOP_STOP_COMPLETE; #endif afs_osi_Wakeup(&afs_termState); return; } } }
static void BStore(struct brequest *ab) { struct vcache *tvc; afs_int32 code; struct vrequest *treq = NULL; #if defined(AFS_SGI_ENV) struct cred *tmpcred; #endif AFS_STATCNT(BStore); if ((code = afs_CreateReq(&treq, ab->cred))) return; tvc = ab->vc; #if defined(AFS_SGI_ENV) /* * Since StoreOnLastReference can end up calling osi_SyncVM which * calls into VM code that assumes that u.u_cred has the * correct credentials, we set our to theirs for this xaction */ tmpcred = OSI_GET_CURRENT_CRED(); OSI_SET_CURRENT_CRED(ab->cred); /* * To avoid recursion since the WriteLock may be released during VM * operations, we hold the VOP_RWLOCK across this transaction as * do the other callers of StoreOnLastReference */ AFS_RWLOCK((vnode_t *) tvc, 1); #endif ObtainWriteLock(&tvc->lock, 209); code = afs_StoreOnLastReference(tvc, treq); ReleaseWriteLock(&tvc->lock); #if defined(AFS_SGI_ENV) OSI_SET_CURRENT_CRED(tmpcred); AFS_RWUNLOCK((vnode_t *) tvc, 1); #endif /* now set final return code, and wakeup anyone waiting */ if ((ab->flags & BUVALID) == 0) { /* To explain code_raw/code_checkcode: * Anyone that's waiting won't have our treq, so they won't be able to * call afs_CheckCode themselves on the return code we provide here. * But if we give back only the afs_CheckCode value, they won't know * what the "raw" value was. So give back both values, so the waiter * can know the "raw" value for interpreting the value internally, as * well as the afs_CheckCode value to give to the OS. */ ab->code_raw = code; ab->code_checkcode = afs_CheckCode(code, treq, 430); ab->flags |= BUVALID; if (ab->flags & BUWAIT) { ab->flags &= ~BUWAIT; afs_osi_Wakeup(ab); } } afs_DestroyReq(treq); }
struct brequest * afs_BQueue(short aopcode, struct vcache *avc, afs_int32 dontwait, afs_int32 ause, afs_ucred_t *acred, afs_size_t asparm0, afs_size_t asparm1, void *apparm0, void *apparm1, void *apparm2) { int i; struct brequest *tb; AFS_STATCNT(afs_BQueue); ObtainWriteLock(&afs_xbrs, 296); while (1) { tb = afs_brs; for (i = 0; i < NBRS; i++, tb++) { if (tb->refCount == 0) break; } if (i < NBRS) { /* found a buffer */ tb->opcode = aopcode; tb->vc = avc; tb->cred = acred; if (tb->cred) { crhold(tb->cred); } if (avc) { AFS_FAST_HOLD(avc); } tb->refCount = ause + 1; tb->size_parm[0] = asparm0; tb->size_parm[1] = asparm1; tb->ptr_parm[0] = apparm0; tb->ptr_parm[1] = apparm1; tb->ptr_parm[2] = apparm2; tb->flags = 0; tb->code_raw = tb->code_checkcode = 0; tb->ts = afs_brs_count++; /* if daemons are waiting for work, wake them up */ if (afs_brsDaemons > 0) { afs_osi_Wakeup(&afs_brsDaemons); } ReleaseWriteLock(&afs_xbrs); return tb; } if (dontwait) { ReleaseWriteLock(&afs_xbrs); return NULL; } /* no free buffers, sleep a while */ afs_brsWaiters++; ReleaseWriteLock(&afs_xbrs); afs_osi_Sleep(&afs_brsWaiters); ObtainWriteLock(&afs_xbrs, 301); afs_brsWaiters--; } }
/* cancel osi_Wait */ void afs_osi_CancelWait(struct afs_osi_WaitHandle *achandle) { caddr_t proc; AFS_STATCNT(osi_CancelWait); proc = achandle->proc; if (proc == 0) return; achandle->proc = (caddr_t) 0; /* so dude can figure out he was signalled */ afs_osi_Wakeup(&waitV); }
/* release a held request buffer */ void afs_BRelease(struct brequest *ab) { AFS_STATCNT(afs_BRelease); ObtainWriteLock(&afs_xbrs, 294); if (--ab->refCount <= 0) { ab->flags = 0; } if (afs_brsWaiters) afs_osi_Wakeup(&afs_brsWaiters); ReleaseWriteLock(&afs_xbrs); }
static void BStore(struct brequest *ab) { struct vcache *tvc; afs_int32 code; struct vrequest treq; #if defined(AFS_SGI_ENV) struct cred *tmpcred; #endif AFS_STATCNT(BStore); if ((code = afs_InitReq(&treq, ab->cred))) return; code = 0; tvc = ab->vc; #if defined(AFS_SGI_ENV) /* * Since StoreOnLastReference can end up calling osi_SyncVM which * calls into VM code that assumes that u.u_cred has the * correct credentials, we set our to theirs for this xaction */ tmpcred = OSI_GET_CURRENT_CRED(); OSI_SET_CURRENT_CRED(ab->cred); /* * To avoid recursion since the WriteLock may be released during VM * operations, we hold the VOP_RWLOCK across this transaction as * do the other callers of StoreOnLastReference */ AFS_RWLOCK((vnode_t *) tvc, 1); #endif ObtainWriteLock(&tvc->lock, 209); code = afs_StoreOnLastReference(tvc, &treq); ReleaseWriteLock(&tvc->lock); #if defined(AFS_SGI_ENV) OSI_SET_CURRENT_CRED(tmpcred); AFS_RWUNLOCK((vnode_t *) tvc, 1); #endif /* now set final return code, and wakeup anyone waiting */ if ((ab->flags & BUVALID) == 0) { ab->code = afs_CheckCode(code, &treq, 43); /* set final code, since treq doesn't go across processes */ ab->flags |= BUVALID; if (ab->flags & BUWAIT) { ab->flags &= ~BUWAIT; afs_osi_Wakeup(ab); } } }
void put_vfs_context(void) { int isglock = ISAFS_GLOCK(); if (!isglock) AFS_GLOCK(); if (afs_osi_ctxtp_initialized) { if (!isglock) AFS_GUNLOCK(); return; } if (vfs_context_owner == current_thread()) vfs_context_owner = (thread_t)0; vfs_context_ref--; afs_osi_Wakeup(&afs_osi_ctxtp); if (!isglock) AFS_GUNLOCK(); }
/* size_parm 0 to the fetch is the chunk number, * ptr_parm 0 is the dcache entry to wakeup, * size_parm 1 is true iff we should release the dcache entry here. */ static void BPrefetch(struct brequest *ab) { struct dcache *tdc; struct vcache *tvc; afs_size_t offset, len, abyte, totallen = 0; struct vrequest *treq = NULL; int code; AFS_STATCNT(BPrefetch); if ((code = afs_CreateReq(&treq, ab->cred))) return; abyte = ab->size_parm[0]; tvc = ab->vc; do { tdc = afs_GetDCache(tvc, abyte, treq, &offset, &len, 1); if (tdc) { afs_PutDCache(tdc); } abyte+=len; totallen += len; } while ((totallen < afs_preCache) && tdc && (len > 0)); /* now, dude may be waiting for us to clear DFFetchReq bit; do so. Can't * use tdc from GetDCache since afs_GetDCache may fail, but someone may * be waiting for our wakeup anyway. */ tdc = (struct dcache *)(ab->ptr_parm[0]); ObtainSharedLock(&tdc->lock, 640); if (tdc->mflags & DFFetchReq) { UpgradeSToWLock(&tdc->lock, 641); tdc->mflags &= ~DFFetchReq; ReleaseWriteLock(&tdc->lock); } else { ReleaseSharedLock(&tdc->lock); } afs_osi_Wakeup(&tdc->validPos); if (ab->size_parm[1]) { afs_PutDCache(tdc); /* put this one back, too */ } afs_DestroyReq(treq); }
static void BPartialStore(struct brequest *ab) { struct vcache *tvc; afs_int32 code; struct vrequest *treq = NULL; int locked, shared_locked = 0; AFS_STATCNT(BStore); if ((code = afs_CreateReq(&treq, ab->cred))) return; tvc = ab->vc; locked = tvc->lock.excl_locked? 1:0; if (!locked) ObtainWriteLock(&tvc->lock, 1209); else if (!(tvc->lock.excl_locked & WRITE_LOCK)) { shared_locked = 1; ConvertSToRLock(&tvc->lock); } code = afs_StoreAllSegments(tvc, treq, AFS_ASYNC); if (!locked) ReleaseWriteLock(&tvc->lock); else if (shared_locked) ConvertSToRLock(&tvc->lock); /* now set final return code, and wakeup anyone waiting */ if ((ab->flags & BUVALID) == 0) { /* set final code, since treq doesn't go across processes */ ab->code_raw = code; ab->code_checkcode = afs_CheckCode(code, treq, 43); ab->flags |= BUVALID; if (ab->flags & BUWAIT) { ab->flags &= ~BUWAIT; afs_osi_Wakeup(ab); } } afs_DestroyReq(treq); }
/* * afs_TruncateAllSegments * * Description: * Truncate a cache file. * * Parameters: * avc : Ptr to vcache entry to truncate. * alen : Number of bytes to make the file. * areq : Ptr to request structure. * * Environment: * Called with avc write-locked; in VFS40 systems, pvnLock is also * held. */ int afs_TruncateAllSegments(struct vcache *avc, afs_size_t alen, struct vrequest *areq, afs_ucred_t *acred) { struct dcache *tdc; afs_int32 code; afs_int32 index; afs_size_t newSize; int dcCount, dcPos; struct dcache **tdcArray = NULL; AFS_STATCNT(afs_TruncateAllSegments); avc->f.m.Date = osi_Time(); afs_Trace3(afs_iclSetp, CM_TRACE_TRUNCALL, ICL_TYPE_POINTER, avc, ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(avc->f.m.Length), ICL_TYPE_OFFSET, ICL_HANDLE_OFFSET(alen)); if (alen >= avc->f.m.Length) { /* * Special speedup since Sun's vm extends the file this way; * we've never written to the file thus we can just set the new * length and avoid the needless calls below. * Also used for ftruncate calls which can extend the file. * To completely minimize the possible extra StoreMini RPC, we really * should keep the ExtendedPos as well and clear this flag if we * truncate below that value before we store the file back. */ avc->f.states |= CExtendedFile; avc->f.m.Length = alen; return 0; } #if (defined(AFS_SUN5_ENV)) /* Zero unused portion of last page */ osi_VM_PreTruncate(avc, alen, acred); #endif #if (defined(AFS_SUN5_ENV)) ObtainWriteLock(&avc->vlock, 546); avc->activeV++; /* Block new getpages */ ReleaseWriteLock(&avc->vlock); #endif ReleaseWriteLock(&avc->lock); AFS_GUNLOCK(); /* Flush pages beyond end-of-file. */ osi_VM_Truncate(avc, alen, acred); AFS_GLOCK(); ObtainWriteLock(&avc->lock, 79); avc->f.m.Length = alen; if (alen < avc->f.truncPos) avc->f.truncPos = alen; code = DVHash(&avc->f.fid); /* block out others from screwing with this table */ ObtainWriteLock(&afs_xdcache, 287); dcCount = 0; for (index = afs_dvhashTbl[code]; index != NULLIDX;) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { ReleaseWriteLock(&afs_xdcache); code = EIO; goto done; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) dcCount++; afs_PutDCache(tdc); } index = afs_dvnextTbl[index]; } /* Now allocate space where we can save those dcache entries, and * do a second pass over them.. Since we're holding xdcache, it * shouldn't be changing. */ tdcArray = osi_Alloc(dcCount * sizeof(struct dcache *)); dcPos = 0; for (index = afs_dvhashTbl[code]; index != NULLIDX; index = afs_dvnextTbl[index]) { if (afs_indexUnique[index] == avc->f.fid.Fid.Unique) { tdc = afs_GetValidDSlot(index); if (!tdc) { /* make sure we put back all of the tdcArray members before * bailing out */ /* remember, the last valid tdc is at dcPos-1, so start at * dcPos-1, not at dcPos itself. */ for (dcPos = dcPos - 1; dcPos >= 0; dcPos--) { tdc = tdcArray[dcPos]; afs_PutDCache(tdc); } code = EIO; goto done; } ReleaseReadLock(&tdc->tlock); if (!FidCmp(&tdc->f.fid, &avc->f.fid)) { /* same file, and modified, we'll store it back */ if (dcPos < dcCount) { tdcArray[dcPos++] = tdc; } else { afs_PutDCache(tdc); } } else { afs_PutDCache(tdc); } } } ReleaseWriteLock(&afs_xdcache); /* Now we loop over the array of dcache entries and truncate them */ for (index = 0; index < dcPos; index++) { struct osi_file *tfile; tdc = tdcArray[index]; newSize = alen - AFS_CHUNKTOBASE(tdc->f.chunk); if (newSize < 0) newSize = 0; ObtainSharedLock(&tdc->lock, 672); if (newSize < tdc->f.chunkBytes && newSize < MAX_AFS_UINT32) { UpgradeSToWLock(&tdc->lock, 673); tdc->f.states |= DWriting; tfile = afs_CFileOpen(&tdc->f.inode); afs_CFileTruncate(tfile, (afs_int32)newSize); afs_CFileClose(tfile); afs_AdjustSize(tdc, (afs_int32)newSize); if (alen < tdc->validPos) { if (alen < AFS_CHUNKTOBASE(tdc->f.chunk)) tdc->validPos = 0; else tdc->validPos = alen; } ConvertWToSLock(&tdc->lock); } ReleaseSharedLock(&tdc->lock); afs_PutDCache(tdc); } code = 0; done: if (tdcArray) { osi_Free(tdcArray, dcCount * sizeof(struct dcache *)); } #if (defined(AFS_SUN5_ENV)) ObtainWriteLock(&avc->vlock, 547); if (--avc->activeV == 0 && (avc->vstates & VRevokeWait)) { avc->vstates &= ~VRevokeWait; afs_osi_Wakeup((char *)&avc->vstates); } ReleaseWriteLock(&avc->vlock); #endif return code; }
/*! * \brief Entry point for user-space AFSDB request handler. * Reads cell data from kerlenMsg and add new cell, or alias. * \param acellName Cell name. If a cell is found, it's name will be filled in here. * \param acellNameLen Cell name length. * \param kernelMsg Buffer containing data about host count, time out, and cell hosts ids. * \return 0 for success, < 0 for error. */ int afs_AFSDBHandler(char *acellName, int acellNameLen, afs_int32 * kernelMsg) { afs_int32 timeout, code; afs_int32 cellHosts[AFS_MAXCELLHOSTS]; if (afsdb_handler_shutdown) return -2; afsdb_handler_running = 1; ObtainSharedLock(&afsdb_req_lock, 683); if (afsdb_req.pending) { int i, hostCount; UpgradeSToWLock(&afsdb_req_lock, 684); hostCount = kernelMsg[0]; timeout = kernelMsg[1]; if (timeout) timeout += osi_Time(); for (i = 0; i < AFS_MAXCELLHOSTS; i++) { if (i >= hostCount) cellHosts[i] = 0; else cellHosts[i] = kernelMsg[2 + i]; } if (hostCount) code = afs_NewCell(acellName, cellHosts, CNoSUID, NULL, 0, 0, timeout); if (!hostCount || (code && code != EEXIST)) /* null out the cellname if the lookup failed */ afsdb_req.cellname = NULL; else /* If we found an alias, create it */ if (afs_strcasecmp(afsdb_req.cellname, acellName)) afs_NewCellAlias(afsdb_req.cellname, acellName); /* Request completed, wake up the relevant thread */ afsdb_req.pending = 0; afsdb_req.complete = 1; afs_osi_Wakeup(&afsdb_req); ConvertWToSLock(&afsdb_req_lock); } ConvertSToRLock(&afsdb_req_lock); /* Wait for a request */ while (afsdb_req.pending == 0 && afs_termState != AFSOP_STOP_AFSDB) { ReleaseReadLock(&afsdb_req_lock); afs_osi_Sleep(&afsdb_req); ObtainReadLock(&afsdb_req_lock); } /* Check if we're shutting down */ if (afs_termState == AFSOP_STOP_AFSDB) { ReleaseReadLock(&afsdb_req_lock); /* Inform anyone waiting for us that we're going away */ afsdb_handler_shutdown = 1; afsdb_handler_running = 0; afs_osi_Wakeup(&afsdb_req); afs_termState = AFSOP_STOP_RXEVENT; afs_osi_Wakeup(&afs_termState); return -2; } /* Return the lookup request to userspace */ strncpy(acellName, afsdb_req.cellname, acellNameLen); ReleaseReadLock(&afsdb_req_lock); return 0; }
void afs_BackgroundDaemon(void) #endif { struct brequest *tb; int i, foundAny; AFS_STATCNT(afs_BackgroundDaemon); /* initialize subsystem */ if (brsInit == 0) /* Irix with "short stack" exits */ afs_BackgroundDaemon_once(); #ifdef AFS_NEW_BKG /* If it's a re-entering syscall, complete the request and release */ if (uspc->ts > -1) { tb = afs_brs; for (i = 0; i < NBRS; i++, tb++) { if (tb->ts == uspc->ts) { /* copy the userspace status back in */ ((struct afs_uspc_param *) tb->ptr_parm[0])->retval = uspc->retval; /* mark it valid and notify our caller */ tb->flags |= BUVALID; if (tb->flags & BUWAIT) { tb->flags &= ~BUWAIT; afs_osi_Wakeup(tb); } brequest_release(tb); break; } } } else { afs_osi_MaskUserLoop(); #endif /* Otherwise it's a new one */ afs_nbrs++; #ifdef AFS_NEW_BKG } #endif ObtainWriteLock(&afs_xbrs, 302); while (1) { int min_ts = 0; struct brequest *min_tb = NULL; if (afs_termState == AFSOP_STOP_BKG) { if (--afs_nbrs <= 0) afs_termState = AFSOP_STOP_RXCALLBACK; ReleaseWriteLock(&afs_xbrs); afs_osi_Wakeup(&afs_termState); #ifdef AFS_NEW_BKG return -2; #else return; #endif } /* find a request */ tb = afs_brs; foundAny = 0; for (i = 0; i < NBRS; i++, tb++) { /* look for request with smallest ts */ if ((tb->refCount > 0) && !(tb->flags & BSTARTED)) { /* new request, not yet picked up */ if ((min_tb && (min_ts - tb->ts > 0)) || !min_tb) { min_tb = tb; min_ts = tb->ts; } } } if ((tb = min_tb)) { /* claim and process this request */ tb->flags |= BSTARTED; ReleaseWriteLock(&afs_xbrs); foundAny = 1; afs_Trace1(afs_iclSetp, CM_TRACE_BKG1, ICL_TYPE_INT32, tb->opcode); if (tb->opcode == BOP_FETCH) BPrefetch(tb); #if defined(AFS_CACHE_BYPASS) else if (tb->opcode == BOP_FETCH_NOCACHE) BPrefetchNoCache(tb); #endif else if (tb->opcode == BOP_STORE) BStore(tb); else if (tb->opcode == BOP_PATH) BPath(tb); #ifdef AFS_DARWIN80_ENV else if (tb->opcode == BOP_MOVE) { memcpy(uspc, (struct afs_uspc_param *) tb->ptr_parm[0], sizeof(struct afs_uspc_param)); uspc->ts = tb->ts; /* string lengths capped in move vop; copy NUL tho */ memcpy(param1, (char *)tb->ptr_parm[1], strlen(tb->ptr_parm[1])+1); memcpy(param2, (char *)tb->ptr_parm[2], strlen(tb->ptr_parm[2])+1); return 0; } #endif else if (tb->opcode == BOP_PARTIAL_STORE) BPartialStore(tb); else panic("background bop"); brequest_release(tb); ObtainWriteLock(&afs_xbrs, 305); } if (!foundAny) { /* wait for new request */ afs_brsDaemons++; ReleaseWriteLock(&afs_xbrs); afs_osi_Sleep(&afs_brsDaemons); ObtainWriteLock(&afs_xbrs, 307); afs_brsDaemons--; } } #ifdef AFS_NEW_BKG return -2; #endif }
/* This function always holds the GLOCK whilst it is running. The caller * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK * whilst we are sleeping, and regains it when we're woken up. */ void afs_Daemon(void) { afs_int32 code; struct afs_exporter *exporter; afs_int32 now; afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck; afs_int32 last1MinCheck, last5MinCheck; afs_uint32 lastCBSlotBump; char cs_warned = 0; AFS_STATCNT(afs_Daemon); afs_rootFid.Fid.Volume = 0; while (afs_initState < 101) afs_osi_Sleep(&afs_initState); #ifdef AFS_DARWIN80_ENV if (afs_osi_ctxtp_initialized) osi_Panic("vfs context already initialized"); while (afs_osi_ctxtp && vfs_context_ref) afs_osi_Sleep(&afs_osi_ctxtp); if (afs_osi_ctxtp && !vfs_context_ref) vfs_context_rele(afs_osi_ctxtp); afs_osi_ctxtp = vfs_context_create(NULL); afs_osi_ctxtp_initialized = 1; #endif now = osi_Time(); lastCBSlotBump = now; /* when a lot of clients are booted simultaneously, they develop * annoying synchronous VL server bashing behaviors. So we stagger them. */ last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */ last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180); last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600); last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600); last5MinCheck = now - 150 + ((afs_random() & 0x7fffffff) % 300); lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180); /* start off with afs_initState >= 101 (basic init done) */ while (1) { afs_CheckCallbacks(20); /* unstat anything which will expire soon */ /* things to do every 20 seconds or less - required by protocol spec */ if (afs_nfsexporter) afs_FlushActiveVcaches(0); /* flush NFS writes */ afs_FlushVCBs(1); /* flush queued callbacks */ afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */ rx_CheckPackets(); /* Does RX need more packets? */ now = osi_Time(); if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */ lastCBSlotBump = now; if (afs_BumpBase()) { afs_CheckCallbacks(20); /* unstat anything which will expire soon */ } } if (last1MinCheck + 60 < now) { /* things to do every minute */ DFlush(); /* write out dir buffers */ afs_WriteThroughDSlots(); /* write through cacheinfo entries */ ObtainWriteLock(&afs_xvcache, 736); afs_FlushReclaimedVcaches(); ReleaseWriteLock(&afs_xvcache); afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */ #if 0 afs_StoreDirtyVcaches(); #endif afs_CheckRXEpoch(); last1MinCheck = now; } if (last3MinCheck + 180 < now) { afs_CheckTokenCache(); /* check for access cache resets due to expired * tickets */ last3MinCheck = now; } if (afsd_dynamic_vcaches && (last5MinCheck + 300 < now)) { /* start with trying to drop us back to our base usage */ int anumber = VCACHE_FREE + (afs_vcount - afs_cacheStats); if (anumber > 0) { ObtainWriteLock(&afs_xvcache, 734); afs_ShakeLooseVCaches(anumber); ReleaseWriteLock(&afs_xvcache); } last5MinCheck = now; } if (!afs_CheckServerDaemonStarted) { /* Do the check here if the correct afsd is not installed. */ if (!cs_warned) { cs_warned = 1; afs_warn("Please install afsd with check server daemon.\n"); } if (lastNMinCheck + afs_probe_interval < now) { /* only check down servers */ afs_CheckServers(1, NULL); lastNMinCheck = now; } } if (last10MinCheck + 600 < now) { #ifdef AFS_USERSPACE_IP_ADDR extern int rxi_GetcbiInfo(void); #endif afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600); #ifdef AFS_USERSPACE_IP_ADDR if (rxi_GetcbiInfo()) { /* addresses changed from last time */ afs_FlushCBs(); } #else /* AFS_USERSPACE_IP_ADDR */ if (rxi_GetIFInfo()) { /* addresses changed from last time */ afs_FlushCBs(); } #endif /* else AFS_USERSPACE_IP_ADDR */ if (!afs_CheckServerDaemonStarted) afs_CheckServers(0, NULL); afs_GCUserData(0); /* gc old conns */ /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */ for (exporter = root_exported; exporter; exporter = exporter->exp_next) { (void)EXP_GC(exporter, 0); /* Generalize params */ } { static int cnt = 0; if (++cnt < 12) { afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED | AFS_VOLCHECK_BUSY); } else { cnt = 0; afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED | AFS_VOLCHECK_BUSY | AFS_VOLCHECK_MTPTS); } } last10MinCheck = now; } if (last60MinCheck + 3600 < now) { afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32, 3600); afs_CheckRootVolume(); #if AFS_GCPAGS if (afs_gcpags == AFS_GCPAGS_OK) { afs_int32 didany; afs_GCPAGs(&didany); } #endif last60MinCheck = now; } if (afs_initState < 300) { /* while things ain't rosy */ code = afs_CheckRootVolume(); if (code == 0) afs_initState = 300; /* succeeded */ if (afs_initState < 200) afs_initState = 200; /* tried once */ afs_osi_Wakeup(&afs_initState); } /* 18285 is because we're trying to divide evenly into 128, that is, * CBSlotLen, while staying just under 20 seconds. If CBSlotLen * changes, should probably change this interval, too. * Some of the preceding actions may take quite some time, so we * might not want to wait the entire interval */ now = 18285 - (osi_Time() - now); if (now > 0) { afs_osi_Wait(now, &AFS_WaitHandler, 0); } if (afs_termState == AFSOP_STOP_AFS) { if (afs_CheckServerDaemonStarted) afs_termState = AFSOP_STOP_CS; else afs_termState = AFSOP_STOP_TRUNCDAEMON; afs_osi_Wakeup(&afs_termState); return; } } }
int afs_CheckRootVolume(void) { char rootVolName[32]; struct volume *tvp = NULL; int usingDynroot = afs_GetDynrootEnable(); int localcell; AFS_STATCNT(afs_CheckRootVolume); if (*afs_rootVolumeName == 0) { strcpy(rootVolName, "root.afs"); } else { strcpy(rootVolName, afs_rootVolumeName); } if (usingDynroot) { afs_GetDynrootFid(&afs_rootFid); tvp = afs_GetVolume(&afs_rootFid, NULL, READ_LOCK); } else { struct cell *lc = afs_GetPrimaryCell(READ_LOCK); if (!lc) return ENOENT; localcell = lc->cellNum; afs_PutCell(lc, READ_LOCK); tvp = afs_GetVolumeByName(rootVolName, localcell, 1, NULL, READ_LOCK); if (!tvp) { char buf[128]; int len = strlen(rootVolName); if ((len < 9) || strcmp(&rootVolName[len - 9], ".readonly")) { strcpy(buf, rootVolName); afs_strcat(buf, ".readonly"); tvp = afs_GetVolumeByName(buf, localcell, 1, NULL, READ_LOCK); } } if (tvp) { int volid = (tvp->roVol ? tvp->roVol : tvp->volume); afs_rootFid.Cell = localcell; if (afs_rootFid.Fid.Volume && afs_rootFid.Fid.Volume != volid && afs_globalVp) { /* If we had a root fid before and it changed location we reset * the afs_globalVp so that it will be reevaluated. * Just decrement the reference count. This only occurs during * initial cell setup and can panic the machine if we set the * count to zero and fs checkv is executed when the current * directory is /afs. */ #ifdef AFS_LINUX20_ENV { struct vrequest *treq = NULL; struct vattr vattr; cred_t *credp; struct dentry *dp; struct vcache *vcp; afs_rootFid.Fid.Volume = volid; afs_rootFid.Fid.Vnode = 1; afs_rootFid.Fid.Unique = 1; credp = crref(); if (afs_CreateReq(&treq, credp)) goto out; vcp = afs_GetVCache(&afs_rootFid, treq, NULL, NULL); if (!vcp) goto out; afs_getattr(vcp, &vattr, credp); afs_fill_inode(AFSTOV(vcp), &vattr); dp = d_find_alias(AFSTOV(afs_globalVp)); #if defined(AFS_LINUX24_ENV) #if defined(HAVE_DCACHE_LOCK) spin_lock(&dcache_lock); #else spin_lock(&AFSTOV(vcp)->i_lock); #endif #if defined(AFS_LINUX26_ENV) spin_lock(&dp->d_lock); #endif #endif #if defined(D_ALIAS_IS_HLIST) hlist_del_init(&dp->d_alias); hlist_add_head(&dp->d_alias, &(AFSTOV(vcp)->i_dentry)); #else list_del_init(&dp->d_alias); list_add(&dp->d_alias, &(AFSTOV(vcp)->i_dentry)); #endif dp->d_inode = AFSTOV(vcp); #if defined(AFS_LINUX24_ENV) #if defined(AFS_LINUX26_ENV) spin_unlock(&dp->d_lock); #endif #if defined(HAVE_DCACHE_LOCK) spin_unlock(&dcache_lock); #else spin_unlock(&AFSTOV(vcp)->i_lock); #endif #endif dput(dp); AFS_FAST_RELE(afs_globalVp); afs_globalVp = vcp; out: crfree(credp); afs_DestroyReq(treq); } #else #ifdef AFS_DARWIN80_ENV afs_PutVCache(afs_globalVp); #else AFS_FAST_RELE(afs_globalVp); #endif afs_globalVp = 0; #endif } afs_rootFid.Fid.Volume = volid; afs_rootFid.Fid.Vnode = 1; afs_rootFid.Fid.Unique = 1; } } if (tvp) { afs_initState = 300; /* won */ afs_osi_Wakeup(&afs_initState); afs_PutVolume(tvp, READ_LOCK); } if (afs_rootFid.Fid.Volume) return 0; else return ENOENT; }