void afs_CheckServerDaemon(void) { afs_int32 now, delay, lastCheck, last10MinCheck; afs_CheckServerDaemonStarted = 1; while (afs_initState < 101) afs_osi_Sleep(&afs_initState); afs_osi_Wait(PROBE_WAIT(), &AFS_CSWaitHandler, 0); last10MinCheck = lastCheck = osi_Time(); while (1) { if (afs_termState == AFSOP_STOP_CS) { afs_termState = AFSOP_STOP_TRUNCDAEMON; afs_osi_Wakeup(&afs_termState); break; } now = osi_Time(); if (afs_probe_interval + lastCheck <= now) { afs_CheckServers(1, NULL); /* check down servers */ lastCheck = now = osi_Time(); } if (afs_probe_all_interval + last10MinCheck <= now) { afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, afs_probe_all_interval); afs_CheckServers(0, NULL); last10MinCheck = now = osi_Time(); } /* shutdown check. */ if (afs_termState == AFSOP_STOP_CS) { afs_termState = AFSOP_STOP_TRUNCDAEMON; afs_osi_Wakeup(&afs_termState); break; } /* Compute time to next probe. */ delay = afs_probe_interval + lastCheck; if (delay > afs_probe_all_interval + last10MinCheck) delay = afs_probe_all_interval + last10MinCheck; delay -= now; if (delay < 1) delay = 1; afs_osi_Wait(delay * 1000, &AFS_CSWaitHandler, 0); } afs_CheckServerDaemonStarted = 0; }
void afs_Daemon(void) { afs_int32 now, last10MinCheck, last60MinCheck; last10MinCheck = 0; last60MinCheck = 0; while (1) { rx_CheckPackets(); now = osi_Time(); if (last10MinCheck + 600 < now) { afs_GCUserData(); } if (last60MinCheck + 3600 < now) { afs_int32 didany; afs_GCPAGs(&didany); } now = 20000 - (osi_Time() - now); afs_osi_Wait(now, &AFS_WaitHandler, 0); if (afs_termState == AFSOP_STOP_AFS) { #if defined(RXK_LISTENER_ENV) afs_termState = AFSOP_STOP_RXEVENT; #else afs_termState = AFSOP_STOP_COMPLETE; #endif afs_osi_Wakeup(&afs_termState); return; } } }
/* dispatch a no-cache read request */ afs_int32 afs_ReadNoCache(struct vcache *avc, struct nocache_read_request *bparms, afs_ucred_t *acred) { afs_int32 code; afs_int32 bcnt; struct brequest *breq; struct vrequest *areq; /* the reciever will free this */ areq = osi_Alloc(sizeof(struct vrequest)); if (avc->vc_error) { code = EIO; afs_warn("afs_ReadNoCache VCache Error!\n"); goto cleanup; } if ((code = afs_InitReq(areq, acred))) { afs_warn("afs_ReadNoCache afs_InitReq error!\n"); goto cleanup; } AFS_GLOCK(); code = afs_VerifyVCache(avc, areq); AFS_GUNLOCK(); if (code) { code = afs_CheckCode(code, areq, 11); /* failed to get it */ afs_warn("afs_ReadNoCache Failed to verify VCache!\n"); goto cleanup; } bparms->areq = areq; /* and queue this one */ bcnt = 1; AFS_GLOCK(); while(bcnt < 20) { breq = afs_BQueue(BOP_FETCH_NOCACHE, avc, B_DONTWAIT, 0, acred, 1, 1, bparms, (void *)0, (void *)0); if(breq != 0) { code = 0; break; } afs_osi_Wait(10 * bcnt, 0, 0); } AFS_GUNLOCK(); if(!breq) { code = EBUSY; goto cleanup; } return code; cleanup: /* If there's a problem before we queue the request, we need to * do everything that would normally happen when the request was * processed, like unlocking the pages and freeing memory. */ unlock_and_release_pages(bparms->auio); osi_Free(areq, sizeof(struct vrequest)); osi_Free(bparms->auio->uio_iov, bparms->auio->uio_iovcnt * sizeof(struct iovec)); osi_Free(bparms->auio, sizeof(struct uio)); osi_Free(bparms, sizeof(struct nocache_read_request)); return code; }
/* This function always holds the GLOCK whilst it is running. The caller * gets the GLOCK before invoking it, and afs_osi_Sleep drops the GLOCK * whilst we are sleeping, and regains it when we're woken up. */ void afs_Daemon(void) { afs_int32 code; struct afs_exporter *exporter; afs_int32 now; afs_int32 last3MinCheck, last10MinCheck, last60MinCheck, lastNMinCheck; afs_int32 last1MinCheck, last5MinCheck; afs_uint32 lastCBSlotBump; char cs_warned = 0; AFS_STATCNT(afs_Daemon); afs_rootFid.Fid.Volume = 0; while (afs_initState < 101) afs_osi_Sleep(&afs_initState); #ifdef AFS_DARWIN80_ENV if (afs_osi_ctxtp_initialized) osi_Panic("vfs context already initialized"); while (afs_osi_ctxtp && vfs_context_ref) afs_osi_Sleep(&afs_osi_ctxtp); if (afs_osi_ctxtp && !vfs_context_ref) vfs_context_rele(afs_osi_ctxtp); afs_osi_ctxtp = vfs_context_create(NULL); afs_osi_ctxtp_initialized = 1; #endif now = osi_Time(); lastCBSlotBump = now; /* when a lot of clients are booted simultaneously, they develop * annoying synchronous VL server bashing behaviors. So we stagger them. */ last1MinCheck = now + ((afs_random() & 0x7fffffff) % 60); /* an extra 30 */ last3MinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180); last60MinCheck = now - 1800 + ((afs_random() & 0x7fffffff) % 3600); last10MinCheck = now - 300 + ((afs_random() & 0x7fffffff) % 600); last5MinCheck = now - 150 + ((afs_random() & 0x7fffffff) % 300); lastNMinCheck = now - 90 + ((afs_random() & 0x7fffffff) % 180); /* start off with afs_initState >= 101 (basic init done) */ while (1) { afs_CheckCallbacks(20); /* unstat anything which will expire soon */ /* things to do every 20 seconds or less - required by protocol spec */ if (afs_nfsexporter) afs_FlushActiveVcaches(0); /* flush NFS writes */ afs_FlushVCBs(1); /* flush queued callbacks */ afs_MaybeWakeupTruncateDaemon(); /* free cache space if have too */ rx_CheckPackets(); /* Does RX need more packets? */ now = osi_Time(); if (lastCBSlotBump + CBHTSLOTLEN < now) { /* pretty time-dependant */ lastCBSlotBump = now; if (afs_BumpBase()) { afs_CheckCallbacks(20); /* unstat anything which will expire soon */ } } if (last1MinCheck + 60 < now) { /* things to do every minute */ DFlush(); /* write out dir buffers */ afs_WriteThroughDSlots(); /* write through cacheinfo entries */ ObtainWriteLock(&afs_xvcache, 736); afs_FlushReclaimedVcaches(); ReleaseWriteLock(&afs_xvcache); afs_FlushActiveVcaches(1); /* keep flocks held & flush nfs writes */ #if 0 afs_StoreDirtyVcaches(); #endif afs_CheckRXEpoch(); last1MinCheck = now; } if (last3MinCheck + 180 < now) { afs_CheckTokenCache(); /* check for access cache resets due to expired * tickets */ last3MinCheck = now; } if (afsd_dynamic_vcaches && (last5MinCheck + 300 < now)) { /* start with trying to drop us back to our base usage */ int anumber = VCACHE_FREE + (afs_vcount - afs_cacheStats); if (anumber > 0) { ObtainWriteLock(&afs_xvcache, 734); afs_ShakeLooseVCaches(anumber); ReleaseWriteLock(&afs_xvcache); } last5MinCheck = now; } if (!afs_CheckServerDaemonStarted) { /* Do the check here if the correct afsd is not installed. */ if (!cs_warned) { cs_warned = 1; afs_warn("Please install afsd with check server daemon.\n"); } if (lastNMinCheck + afs_probe_interval < now) { /* only check down servers */ afs_CheckServers(1, NULL); lastNMinCheck = now; } } if (last10MinCheck + 600 < now) { #ifdef AFS_USERSPACE_IP_ADDR extern int rxi_GetcbiInfo(void); #endif afs_Trace1(afs_iclSetp, CM_TRACE_PROBEUP, ICL_TYPE_INT32, 600); #ifdef AFS_USERSPACE_IP_ADDR if (rxi_GetcbiInfo()) { /* addresses changed from last time */ afs_FlushCBs(); } #else /* AFS_USERSPACE_IP_ADDR */ if (rxi_GetIFInfo()) { /* addresses changed from last time */ afs_FlushCBs(); } #endif /* else AFS_USERSPACE_IP_ADDR */ if (!afs_CheckServerDaemonStarted) afs_CheckServers(0, NULL); afs_GCUserData(0); /* gc old conns */ /* This is probably the wrong way of doing GC for the various exporters but it will suffice for a while */ for (exporter = root_exported; exporter; exporter = exporter->exp_next) { (void)EXP_GC(exporter, 0); /* Generalize params */ } { static int cnt = 0; if (++cnt < 12) { afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED | AFS_VOLCHECK_BUSY); } else { cnt = 0; afs_CheckVolumeNames(AFS_VOLCHECK_EXPIRED | AFS_VOLCHECK_BUSY | AFS_VOLCHECK_MTPTS); } } last10MinCheck = now; } if (last60MinCheck + 3600 < now) { afs_Trace1(afs_iclSetp, CM_TRACE_PROBEVOLUME, ICL_TYPE_INT32, 3600); afs_CheckRootVolume(); #if AFS_GCPAGS if (afs_gcpags == AFS_GCPAGS_OK) { afs_int32 didany; afs_GCPAGs(&didany); } #endif last60MinCheck = now; } if (afs_initState < 300) { /* while things ain't rosy */ code = afs_CheckRootVolume(); if (code == 0) afs_initState = 300; /* succeeded */ if (afs_initState < 200) afs_initState = 200; /* tried once */ afs_osi_Wakeup(&afs_initState); } /* 18285 is because we're trying to divide evenly into 128, that is, * CBSlotLen, while staying just under 20 seconds. If CBSlotLen * changes, should probably change this interval, too. * Some of the preceding actions may take quite some time, so we * might not want to wait the entire interval */ now = 18285 - (osi_Time() - now); if (now > 0) { afs_osi_Wait(now, &AFS_WaitHandler, 0); } if (afs_termState == AFSOP_STOP_AFS) { if (afs_CheckServerDaemonStarted) afs_termState = AFSOP_STOP_CS; else afs_termState = AFSOP_STOP_TRUNCDAEMON; afs_osi_Wakeup(&afs_termState); return; } } }
/* clid - nonzero on sgi sunos osf1 only */ int HandleFlock(struct vcache *avc, int acom, struct vrequest *areq, pid_t clid, int onlymine) { struct afs_conn *tc; struct SimpleLocks *slp, *tlp, **slpp; afs_int32 code; struct AFSVolSync tsync; afs_int32 lockType; struct AFS_FLOCK flock; XSTATS_DECLS; AFS_STATCNT(HandleFlock); code = 0; /* default when we don't make any network calls */ lockIdSet(&flock, NULL, clid); #if defined(AFS_SGI_ENV) osi_Assert(valusema(&avc->vc_rwlock) <= 0); osi_Assert(OSI_GET_LOCKID() == avc->vc_rwlockid); #endif ObtainWriteLock(&avc->lock, 118); if (acom & LOCK_UN) { int stored_segments = 0; retry_unlock: /* defect 3083 */ #ifdef AFS_AIX_ENV /* If the lock is held exclusive, then only the owning process * or a child can unlock it. Use pid and ppid because they are * unique identifiers. */ if ((avc->flockCount < 0) && (getpid() != avc->ownslock)) { #ifdef AFS_AIX41_ENV if (onlymine || (getppid() != avc->ownslock)) { #else if (onlymine || (u.u_procp->p_ppid != avc->ownslock)) { #endif ReleaseWriteLock(&avc->lock); return 0; } } #endif if (lockIdcmp2(&flock, avc, NULL, onlymine, clid)) { ReleaseWriteLock(&avc->lock); return 0; } #ifdef AFS_AIX_ENV avc->ownslock = 0; #endif if (avc->flockCount == 0) { ReleaseWriteLock(&avc->lock); return 0 /*ENOTTY*/; /* no lock held */ } /* unlock the lock */ if (avc->flockCount > 0) { slpp = &avc->slocks; for (slp = *slpp; slp;) { if (!lockIdcmp2(&flock, avc, slp, onlymine, clid)) { avc->flockCount--; tlp = *slpp = slp->next; osi_FreeSmallSpace(slp); slp = tlp; } else { slpp = &slp->next; slp = *slpp; } } } else if (avc->flockCount == -1) { if (!stored_segments) { afs_StoreAllSegments(avc, areq, AFS_SYNC | AFS_VMSYNC); /* fsync file early */ /* afs_StoreAllSegments can drop and reacquire the write lock * on avc and GLOCK, so the flocks may be completely different * now. Go back and perform all checks again. */ stored_segments = 1; goto retry_unlock; } avc->flockCount = 0; /* And remove the (only) exclusive lock entry from the list... */ osi_FreeSmallSpace(avc->slocks); avc->slocks = 0; } if (avc->flockCount == 0) { if (!AFS_IS_DISCONNECTED) { struct rx_connection *rxconn; do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_RELEASELOCK); RX_AFS_GUNLOCK(); code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *) &avc->f.fid.Fid, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL)); } else { /*printf("Network is dooooooowwwwwwwnnnnnnn\n");*/ code = ENETDOWN; } } } else { while (1) { /* set a new lock */ /* * Upgrading from shared locks to Exclusive and vice versa * is a bit tricky and we don't really support it yet. But * we try to support the common used one which is upgrade * a shared lock to an exclusive for the same process... */ if ((avc->flockCount > 0 && (acom & LOCK_EX)) || (avc->flockCount == -1 && (acom & LOCK_SH))) { /* * Upgrading from shared locks to an exclusive one: * For now if all the shared locks belong to the * same process then we unlock them on the server * and proceed with the upgrade. Unless we change the * server's locking interface impl we prohibit from * unlocking other processes's shared locks... * Upgrading from an exclusive lock to a shared one: * Again only allowed to be done by the same process. */ slpp = &avc->slocks; for (slp = *slpp; slp;) { if (!lockIdcmp2 (&flock, avc, slp, 1 /*!onlymine */ , clid)) { if (acom & LOCK_EX) avc->flockCount--; else avc->flockCount = 0; tlp = *slpp = slp->next; osi_FreeSmallSpace(slp); slp = tlp; } else { code = EWOULDBLOCK; slpp = &slp->next; slp = *slpp; } } if (!code && avc->flockCount == 0) { if (!AFS_IS_DISCONNECTED) { struct rx_connection *rxconn; do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME (AFS_STATS_FS_RPCIDX_RELEASELOCK); RX_AFS_GUNLOCK(); code = RXAFS_ReleaseLock(rxconn, (struct AFSFid *)&avc-> f.fid.Fid, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_RELEASELOCK, SHARED_LOCK, NULL)); } } } else if (avc->flockCount == -1 && (acom & LOCK_EX)) { if (lockIdcmp2(&flock, avc, NULL, 1, clid)) { code = EWOULDBLOCK; } else { code = 0; /* We've just re-grabbed an exclusive lock, so we don't * need to contact the fileserver, and we don't need to * add the lock to avc->slocks (since we already have a * lock there). So, we are done. */ break; } } if (code == 0) { /* compatible here, decide if needs to go to file server. If * we've already got the file locked (and thus read-locked, since * we've already checked for compatibility), we shouldn't send * the call through to the server again */ if (avc->flockCount == 0) { struct rx_connection *rxconn; /* we're the first on our block, send the call through */ lockType = ((acom & LOCK_EX) ? LockWrite : LockRead); if (!AFS_IS_DISCONNECTED) { do { tc = afs_Conn(&avc->f.fid, areq, SHARED_LOCK, &rxconn); if (tc) { XSTATS_START_TIME(AFS_STATS_FS_RPCIDX_SETLOCK); RX_AFS_GUNLOCK(); code = RXAFS_SetLock(rxconn, (struct AFSFid *) &avc->f.fid.Fid, lockType, &tsync); RX_AFS_GLOCK(); XSTATS_END_TIME; } else code = -1; } while (afs_Analyze (tc, rxconn, code, &avc->f.fid, areq, AFS_STATS_FS_RPCIDX_SETLOCK, SHARED_LOCK, NULL)); if ((lockType == LockWrite) && (code == VREADONLY)) code = EBADF; /* per POSIX; VREADONLY == EROFS */ } else /* XXX - Should probably try and log this when we're * XXX - running with logging enabled. But it's horrid */ code = 0; /* pretend we worked - ick!!! */ } else code = 0; /* otherwise, pretend things worked */ } if (code == 0) { slp = (struct SimpleLocks *) osi_AllocSmallSpace(sizeof(struct SimpleLocks)); if (acom & LOCK_EX) { /* defect 3083 */ #ifdef AFS_AIX_ENV /* Record unique id of process owning exclusive lock. */ avc->ownslock = getpid(); #endif slp->type = LockWrite; slp->next = NULL; avc->slocks = slp; avc->flockCount = -1; } else { slp->type = LockRead; slp->next = avc->slocks; avc->slocks = slp; avc->flockCount++; } lockIdSet(&flock, slp, clid); break; } /* now, if we got EWOULDBLOCK, and we're supposed to wait, we do */ if (((code == EWOULDBLOCK) || (code == EAGAIN) || (code == UAEWOULDBLOCK) || (code == UAEAGAIN)) && !(acom & LOCK_NB)) { /* sleep for a second, allowing interrupts */ ReleaseWriteLock(&avc->lock); #if defined(AFS_SGI_ENV) AFS_RWUNLOCK((vnode_t *) avc, VRWLOCK_WRITE); #endif code = afs_osi_Wait(1000, NULL, 1); #if defined(AFS_SGI_ENV) AFS_RWLOCK((vnode_t *) avc, VRWLOCK_WRITE); #endif ObtainWriteLock(&avc->lock, 120); if (code) { code = EINTR; /* return this if ^C typed */ break; } } else break; } /* while loop */ } ReleaseWriteLock(&avc->lock); code = afs_CheckCode(code, areq, 1); /* defeat a buggy AIX optimization */ return code; } /* warn a user that a lock has been ignored */ afs_int32 lastWarnTime = 0; /* this is used elsewhere */ static afs_int32 lastWarnPid = 0; static void DoLockWarning(afs_ucred_t * acred) { afs_int32 now; pid_t pid = MyPidxx2Pid(MyPidxx); char *procname; now = osi_Time(); AFS_STATCNT(DoLockWarning); /* check if we've already warned this user recently */ if (!((now < lastWarnTime + 120) && (lastWarnPid == pid))) { procname = afs_osi_Alloc(256); if (!procname) return; /* Copies process name to allocated procname, see osi_machdeps for details of macro */ osi_procname(procname, 256); procname[255] = '\0'; /* otherwise, it is time to nag the user */ lastWarnTime = now; lastWarnPid = pid; #ifdef AFS_LINUX26_ENV afs_warnuser ("afs: byte-range locks only enforced for processes on this machine (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred)); #else afs_warnuser ("afs: byte-range lock/unlock ignored; make sure no one else is running this program (pid %d (%s), user %ld).\n", pid, procname, (long)afs_cr_uid(acred)); #endif afs_osi_Free(procname, 256); } return; }