/* * This function returns a free (not in the LRU queue) acl cache entry. * It must be called with the cm_aclLock lock held */ static cm_aclent_t *GetFreeACLEnt(cm_scache_t * scp) { cm_aclent_t *aclp; cm_scache_t *ascp = 0; if (cm_data.aclLRUp == NULL) osi_panic("empty aclent LRU", __FILE__, __LINE__); if (cm_data.aclLRUEndp == NULL) osi_panic("inconsistent aclent LRUEndp == NULL", __FILE__, __LINE__); aclp = cm_data.aclLRUEndp; osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); if (aclp->backp && scp != aclp->backp) { ascp = aclp->backp; lock_ReleaseWrite(&cm_aclLock); lock_ObtainWrite(&ascp->rw); lock_ObtainWrite(&cm_aclLock); } CleanupACLEnt(aclp); if (ascp) lock_ReleaseWrite(&ascp->rw); return aclp; }
static osi_lock_ref_t * lock_GetLockRef(void * lockp, char type) { osi_lock_ref_t * lockRefp = NULL; EnterCriticalSection(&lock_ref_CS); if (lock_ref_FreeListp) { lockRefp = lock_ref_FreeListp; osi_QRemoveHT( (osi_queue_t **) &lock_ref_FreeListp, (osi_queue_t **) &lock_ref_FreeListEndp, &lockRefp->q); } LeaveCriticalSection(&lock_ref_CS); if (lockRefp == NULL) lockRefp = (osi_lock_ref_t *)malloc(sizeof(osi_lock_ref_t)); memset(lockRefp, 0, sizeof(osi_lock_ref_t)); lockRefp->type = type; switch (type) { case OSI_LOCK_MUTEX: lockRefp->mx = lockp; break; case OSI_LOCK_RW: lockRefp->rw = lockp; break; default: osi_panic("Invalid Lock Type", __FILE__, __LINE__); } return lockRefp; }
/* must be called with sleep bucket locked. * Frees the structure if it has a 0 reference count (and removes it * from the hash bucket). Otherwise, we simply mark the item * for deleting when the ref count hits zero. */ void osi_FreeSleepInfo(osi_sleepInfo_t *sp) { LONG_PTR idx; if (sp->refCount > 0) { TlsSetValue(osi_SleepSlot, NULL); /* don't reuse me */ _InterlockedOr(&sp->states, OSI_SLEEPINFO_DELETED); return; } /* remove from hash if still there */ if (sp->states & OSI_SLEEPINFO_INHASH) { idx = osi_SLEEPHASH(sp->value); osi_QRemoveHT((osi_queue_t **) &osi_sleepers[idx], (osi_queue_t **) &osi_sleepersEnd[idx], &sp->q); _InterlockedAnd(&sp->states, ~OSI_SLEEPINFO_INHASH); } if (sp->states & OSI_SLEEPINFO_DELETED) { EnterCriticalSection(&osi_sleepInfoAllocCS); sp->q.nextp = (osi_queue_t *) osi_sleepInfoFreeListp; osi_sleepInfoFreeListp = sp; _InterlockedAnd(&sp->states, ~OSI_SLEEPINFO_DELETED); InterlockedIncrement(&osi_sleepInfoCount); LeaveCriticalSection(&osi_sleepInfoAllocCS); } }
/* like TSignal, only wake *everyone* */ void osi_TBroadcast(osi_turnstile_t *turnp) { osi_sleepInfo_t *sp; while(sp = turnp->lastp) { osi_QRemoveHT((osi_queue_t **) &turnp->firstp, (osi_queue_t **) &turnp->lastp, &sp->q); _InterlockedOr(&sp->states, OSI_SLEEPINFO_SIGNALLED); ReleaseSemaphore(sp->sema, 1, NULL); } /* while someone's still asleep */ }
void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepRProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "osi_SleepR: not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } /* XXX better to get the list of things to wakeup from TSignalForMLs, and * then do the wakeup after SleepSpin releases the low-level mutex. */ if (--(lockp->readers) == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* now call into scheduler to sleep atomically with releasing spin lock */ osi_SleepSpin(sleepVal, csp); }
/* * Get an acl cache entry for a particular user and file, or return that it doesn't exist. * Called with the scp write locked. */ long cm_FindACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 *rightsp) { cm_aclent_t *aclp; long retval = -1; time_t now = time(NULL); lock_AssertWrite(&scp->rw); lock_ObtainWrite(&cm_aclLock); *rightsp = 0; /* get a new acl from server if we don't find a * current entry */ for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) { if (aclp->userp == userp) { if (aclp->tgtLifetime && aclp->tgtLifetime <= now) { /* ticket expired */ osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); CleanupACLEnt(aclp); /* move to the tail of the LRU queue */ osi_QAddT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); } else { *rightsp = aclp->randomAccess; if (cm_data.aclLRUp != aclp) { /* move to the head of the LRU queue */ osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); } retval = 0; /* success */ } break; } } lock_ReleaseWrite(&cm_aclLock); return retval; }
/* must be called with a critical section held that guards the turnstile * structure. We remove the sleepInfo structure from the queue so we don't * wake the guy again, but we don't free it because we're still using the * semaphore until the guy waiting wakes up. */ void osi_TSignal(osi_turnstile_t *turnp) { osi_sleepInfo_t *sp; if (!turnp->lastp) return; sp = turnp->lastp; osi_QRemoveHT((osi_queue_t **) &turnp->firstp, (osi_queue_t **) &turnp->lastp, &sp->q); _InterlockedOr(&sp->states, OSI_SLEEPINFO_SIGNALLED); ReleaseSemaphore(sp->sema, 1, NULL); }
void lock_ReleaseWrite(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseWriteProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "write lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread"); lockp->tid[0] = 0; lockp->flags &= ~OSI_LOCKFLAG_EXCL; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { /* and finally release the big lock */ LeaveCriticalSection(csp); } }
void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepWProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held"); lockp->flags &= ~OSI_LOCKFLAG_EXCL; lockp->tid[0] = 0; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* and finally release the big lock */ osi_SleepSpin(sleepVal, csp); }
/* * Add rights to an acl cache entry. Do the right thing if not present, * including digging up an entry from the LRU queue. * * The scp must be locked when this function is called. */ long cm_AddACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 rights) { struct cm_aclent *aclp; time_t tgtLifeTime; tgtLifeTime = cm_TGTLifeTime(userp, scp->fid.cell); lock_ObtainWrite(&cm_aclLock); for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) { if (aclp->userp == userp) { aclp->randomAccess = rights; if (aclp->tgtLifetime < tgtLifeTime) aclp->tgtLifetime = tgtLifeTime; if (cm_data.aclLRUp != aclp) { /* move to the head of the LRU queue */ osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); } lock_ReleaseWrite(&cm_aclLock); return 0; } } /* * Didn't find the dude we're looking for, so take someone from the LRUQ * and reuse. But first try the free list and see if there's already * someone there. */ aclp = GetFreeACLEnt(scp); /* can't fail, panics instead */ osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); aclp->backp = scp; aclp->nextp = scp->randomACLp; scp->randomACLp = aclp; cm_HoldUser(userp); aclp->userp = userp; aclp->randomAccess = rights; aclp->tgtLifetime = tgtLifeTime; lock_ReleaseWrite(&cm_aclLock); return 0; }
void cm_BkgDaemon(void * parm) { cm_bkgRequest_t *rp; afs_int32 code; char name[32] = ""; long daemonID = (long)parm; snprintf(name, sizeof(name), "cm_BkgDaemon_ShutdownEvent%d", daemonID); cm_BkgDaemon_ShutdownEvent[daemonID] = thrd_CreateEvent(NULL, FALSE, FALSE, name); if ( GetLastError() == ERROR_ALREADY_EXISTS ) afsi_log("Event Object Already Exists: %s", name); rx_StartClientThread(); lock_ObtainWrite(&cm_daemonLock); while (daemon_ShutdownFlag == 0) { if (powerStateSuspended) { Sleep(1000); continue; } if (!cm_bkgListEndp) { osi_SleepW((LONG_PTR)&cm_bkgListp, &cm_daemonLock); lock_ObtainWrite(&cm_daemonLock); continue; } /* we found a request */ for (rp = cm_bkgListEndp; rp; rp = (cm_bkgRequest_t *) osi_QPrev(&rp->q)) { if (cm_ServerAvailable(&rp->scp->fid, rp->userp) && !(rp->scp->flags & CM_SCACHEFLAG_DATASTORING)) break; } if (rp == NULL) { /* we couldn't find a request that we could process at the current time */ lock_ReleaseWrite(&cm_daemonLock); Sleep(1000); lock_ObtainWrite(&cm_daemonLock); continue; } osi_QRemoveHT((osi_queue_t **) &cm_bkgListp, (osi_queue_t **) &cm_bkgListEndp, &rp->q); osi_assertx(cm_bkgQueueCount-- > 0, "cm_bkgQueueCount 0"); lock_ReleaseWrite(&cm_daemonLock); osi_Log1(afsd_logp,"cm_BkgDaemon processing request 0x%p", rp); #ifdef DEBUG_REFCOUNT osi_Log2(afsd_logp,"cm_BkgDaemon (before) scp 0x%x ref %d",rp->scp, rp->scp->refCount); #endif code = (*rp->procp)(rp->scp, rp->p1, rp->p2, rp->p3, rp->p4, rp->userp); #ifdef DEBUG_REFCOUNT osi_Log2(afsd_logp,"cm_BkgDaemon (after) scp 0x%x ref %d",rp->scp, rp->scp->refCount); #endif /* * Keep the following list synchronized with the * error code list in cm_BkgStore. * cm_SyncOpDone(CM_SCACHESYNC_ASYNCSTORE) will be called there unless * one of these errors has occurred. */ switch ( code ) { case CM_ERROR_TIMEDOUT: /* or server restarting */ case CM_ERROR_RETRY: case CM_ERROR_WOULDBLOCK: case CM_ERROR_ALLBUSY: case CM_ERROR_ALLDOWN: case CM_ERROR_ALLOFFLINE: case CM_ERROR_PARTIALWRITE: if (rp->procp == cm_BkgStore) { osi_Log2(afsd_logp, "cm_BkgDaemon re-queueing failed request 0x%p code 0x%x", rp, code); lock_ObtainWrite(&cm_daemonLock); cm_bkgQueueCount++; osi_QAddT((osi_queue_t **) &cm_bkgListp, (osi_queue_t **)&cm_bkgListEndp, &rp->q); break; } /* otherwise fall through */ case 0: /* success */ default: /* other error */ if (code == 0) osi_Log1(afsd_logp,"cm_BkgDaemon SUCCESS: request 0x%p", rp); else osi_Log2(afsd_logp,"cm_BkgDaemon FAILED: request dropped 0x%p code 0x%x", rp, code); cm_ReleaseUser(rp->userp); cm_ReleaseSCache(rp->scp); free(rp); lock_ObtainWrite(&cm_daemonLock); } } lock_ReleaseWrite(&cm_daemonLock); thrd_SetEvent(cm_BkgDaemon_ShutdownEvent[daemonID]); }
void lock_ReleaseRead(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseReadProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "read lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "read lock not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } lockp->readers--; /* releasing a read lock can allow writers */ if (lockp->readers == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { osi_assertx(lockp->readers >= 0, "read lock underflow"); /* and finally release the big lock */ LeaveCriticalSection(csp); } }
/* special turnstile signal for mutexes and locks. Wakes up only those who * will really be able to lock the lock. The assumption is that everyone who * already can use the lock has already been woken (and is thus not in the * turnstile any longer). * * The stillHaveReaders parm is set to 1 if this is a convert from write to read, * indicating that there is still at least one reader, and we should only wake * up other readers. We use it in a tricky manner: we just pretent we already woke * a reader, and that is sufficient to prevent us from waking a writer. * * The crit sec. csp is released before the threads are woken, but after they * are removed from the turnstile. It helps ensure that we won't have a spurious * context swap back to us if the release performs a context swap for some reason. */ void osi_TSignalForMLs(osi_turnstile_t *turnp, int stillHaveReaders, CRITICAL_SECTION *csp) { osi_sleepInfo_t *tsp; /* a temp */ osi_sleepInfo_t *nsp; /* a temp */ osi_queue_t *wakeupListp; /* list of dudes to wakeup after dropping lock */ int wokeReader; unsigned short *sp; unsigned char *cp; wokeReader = stillHaveReaders; wakeupListp = NULL; while(tsp = turnp->lastp) { /* look at each sleepInfo until we find someone we're not supposed to * wakeup. */ if (tsp->waitFor & OSI_SLEEPINFO_W4WRITE) { if (wokeReader) break; } else wokeReader = 1; /* otherwise, we will wake this guy. For now, remove from this list * and move to private one, so we can do the wakeup after releasing * the crit sec. */ osi_QRemoveHT((osi_queue_t **) &turnp->firstp, (osi_queue_t **) &turnp->lastp, &tsp->q); /* do the patching required for lock obtaining */ if (tsp->waitFor & OSI_SLEEPINFO_W4WRITE) { cp = (void *) tsp->value; (*cp) |= OSI_LOCKFLAG_EXCL; tsp->tidp[0] = tsp->tid; } else if (tsp->waitFor & OSI_SLEEPINFO_W4READ) { sp = (void *) tsp->value; #ifdef DEBUG if ((*sp) < OSI_RWLOCK_THREADS) tsp->tidp[*sp] = tsp->tid; #endif (*sp)++; } /* and add to our own list */ tsp->q.nextp = wakeupListp; wakeupListp = &tsp->q; /* now if we woke a writer, we're done, since it is pointless * to wake more than one writer. */ if (!wokeReader) break; } /* hit end, or found someone we're not supposed to wakeup */ if (csp) LeaveCriticalSection(csp); /* finally, wakeup everyone we found. Don't free things since the sleeper * will free the sleepInfo structure. */ for(tsp = (osi_sleepInfo_t *) wakeupListp; tsp; tsp = nsp) { /* pull this out first, since *tsp *could* get freed immediately * after the ReleaseSemaphore, if a context swap occurs. */ nsp = (osi_sleepInfo_t *) tsp->q.nextp; _InterlockedOr(&tsp->states, OSI_SLEEPINFO_SIGNALLED); ReleaseSemaphore(tsp->sema, 1, NULL); } }
void * cm_BkgDaemon(void * vparm) { cm_bkgRequest_t *rp; afs_int32 code; char name[32] = ""; long daemonID = (long)(LONG_PTR)vparm; snprintf(name, sizeof(name), "cm_BkgDaemon_ShutdownEvent%u", daemonID); cm_BkgDaemon_ShutdownEvent[daemonID] = thrd_CreateEvent(NULL, FALSE, FALSE, name); if ( GetLastError() == ERROR_ALREADY_EXISTS ) afsi_log("Event Object Already Exists: %s", name); rx_StartClientThread(); lock_ObtainWrite(&cm_daemonLockp[daemonID]); while (daemon_ShutdownFlag == 0) { int willBlock = 0; if (powerStateSuspended) { Sleep(1000); continue; } if (!cm_bkgListEndpp[daemonID]) { osi_SleepW((LONG_PTR)&cm_bkgListpp[daemonID], &cm_daemonLockp[daemonID]); lock_ObtainWrite(&cm_daemonLockp[daemonID]); continue; } /* we found a request */ for (rp = cm_bkgListEndpp[daemonID]; rp; rp = (cm_bkgRequest_t *) osi_QPrev(&rp->q)) { if (rp->scp->flags & CM_SCACHEFLAG_DELETED) break; /* * If the request has active I/O such that this worker would * be forced to block, leave the request in the queue and move * on to one that might be available for servicing. */ if (cm_RequestWillBlock(rp)) { willBlock++; continue; } if (cm_ServerAvailable(&rp->scp->fid, rp->userp)) break; } if (rp == NULL) { /* * Couldn't find a request that we could process at the * current time. If there were requests that would cause * the worker to block, sleep for 25ms so it can promptly * respond when it is available. Otherwise, sleep for 1s. * * This polling cycle needs to be replaced with a proper * producer/consumer dynamic worker pool. */ osi_Log2(afsd_logp,"cm_BkgDaemon[%u] sleeping %dms all tasks would block", daemonID, willBlock ? 100 : 1000); lock_ReleaseWrite(&cm_daemonLockp[daemonID]); Sleep(willBlock ? 100 : 1000); lock_ObtainWrite(&cm_daemonLockp[daemonID]); continue; } osi_QRemoveHT((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **) &cm_bkgListEndpp[daemonID], &rp->q); osi_assertx(cm_bkgQueueCountp[daemonID]-- > 0, "cm_bkgQueueCount 0"); lock_ReleaseWrite(&cm_daemonLockp[daemonID]); osi_Log2(afsd_logp,"cm_BkgDaemon[%u] processing request 0x%p", daemonID, rp); if (rp->scp->flags & CM_SCACHEFLAG_DELETED) { osi_Log2(afsd_logp,"cm_BkgDaemon[%u] DELETED scp 0x%x", daemonID, rp->scp); code = CM_ERROR_BADFD; } else { #ifdef DEBUG_REFCOUNT osi_Log3(afsd_logp,"cm_BkgDaemon[%u] (before) scp 0x%x ref %d", daemonID, rp->scp, rp->scp->refCount); #endif code = (*rp->procp)(rp->scp, rp->p1, rp->p2, rp->p3, rp->p4, rp->userp, &rp->req); #ifdef DEBUG_REFCOUNT osi_Log3(afsd_logp,"cm_BkgDaemon[%u] (after) scp 0x%x ref %d", daemonID, rp->scp, rp->scp->refCount); #endif } /* * Keep the following list synchronized with the * error code list in cm_BkgStore. * cm_SyncOpDone(CM_SCACHESYNC_ASYNCSTORE) will be called there unless * one of these errors has occurred. */ switch ( code ) { case CM_ERROR_TIMEDOUT: /* or server restarting */ case CM_ERROR_RETRY: case CM_ERROR_WOULDBLOCK: case CM_ERROR_ALLBUSY: case CM_ERROR_ALLDOWN: case CM_ERROR_ALLOFFLINE: case CM_ERROR_PARTIALWRITE: if (rp->procp == cm_BkgStore || rp->procp == RDR_BkgFetch) { osi_Log3(afsd_logp, "cm_BkgDaemon[%u] re-queueing failed request 0x%p code 0x%x", daemonID, rp, code); lock_ObtainWrite(&cm_daemonLockp[daemonID]); cm_bkgQueueCountp[daemonID]++; osi_QAddT((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **)&cm_bkgListEndpp[daemonID], &rp->q); break; } /* otherwise fall through */ case 0: /* success */ default: /* other error */ if (code == 0) { osi_Log2(afsd_logp,"cm_BkgDaemon[%u] SUCCESS: request 0x%p", daemonID, rp); } else { osi_Log3(afsd_logp,"cm_BkgDaemon[%u] FAILED: request dropped 0x%p code 0x%x", daemonID, rp, code); } cm_ReleaseUser(rp->userp); cm_ReleaseSCache(rp->scp); free(rp); lock_ObtainWrite(&cm_daemonLockp[daemonID]); } } lock_ReleaseWrite(&cm_daemonLockp[daemonID]); thrd_SetEvent(cm_BkgDaemon_ShutdownEvent[daemonID]); pthread_exit(NULL); return NULL; }