/* * Add rights to an acl cache entry. Do the right thing if not present, * including digging up an entry from the LRU queue. * * The scp must be locked when this function is called. */ long cm_AddACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 rights) { register struct cm_aclent *aclp; lock_ObtainWrite(&cm_aclLock); for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) { if (aclp->userp == userp) { aclp->randomAccess = rights; if (aclp->tgtLifetime == 0) aclp->tgtLifetime = cm_TGTLifeTime(pag); lock_ReleaseWrite(&cm_aclLock); return 0; } } /* * Didn't find the dude we're looking for, so take someone from the LRUQ * and reuse. But first try the free list and see if there's already * someone there. */ aclp = GetFreeACLEnt(scp); /* can't fail, panics instead */ osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); aclp->backp = scp; aclp->nextp = scp->randomACLp; scp->randomACLp = aclp; cm_HoldUser(userp); aclp->userp = userp; aclp->randomAccess = rights; aclp->tgtLifetime = cm_TGTLifeTime(userp); lock_ReleaseWrite(&cm_aclLock); return 0; }
void lock_ObtainWrite(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i=lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ObtainWriteProc)(lockp); return; } if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) lock_VerifyOrderRW(lockRefH, lockRefT, lockp); } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockp->flags & OSI_LOCKFLAG_EXCL) { osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD"); } else { for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) { osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD"); } } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) || (lockp->readers > 0)) { lockp->waiters++; osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = tid; } osi_assertx(lockp->readers == 0, "write lock readers present"); LeaveCriticalSection(csp); if (lockOrderValidation) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } }
void cm_QueueBKGRequest(cm_scache_t *scp, cm_bkgProc_t *procp, afs_uint32 p1, afs_uint32 p2, afs_uint32 p3, afs_uint32 p4, cm_user_t *userp, cm_req_t *reqp) { cm_bkgRequest_t *rp, *rpq; afs_uint32 daemonID; int duplicate = 0; rp = malloc(sizeof(*rp)); memset(rp, 0, sizeof(*rp)); cm_HoldSCache(scp); rp->scp = scp; cm_HoldUser(userp); rp->userp = userp; rp->procp = procp; rp->p1 = p1; rp->p2 = p2; rp->p3 = p3; rp->p4 = p4; rp->req = *reqp; /* Use separate queues for fetch and store operations */ daemonID = scp->fid.hash % (cm_nDaemons/2) * 2; if (procp == cm_BkgStore) daemonID++; lock_ObtainWrite(&cm_daemonLockp[daemonID]); /* Check to see if this is a duplicate request */ for (rpq = cm_bkgListpp[daemonID]; rpq; rpq = (cm_bkgRequest_t *) osi_QNext(&rpq->q)) { if ( rpq->p1 == p1 && rpq->p3 == p3 && rpq->procp == procp && rpq->p2 == p2 && rpq->p4 == p4 && rpq->scp == scp && rpq->userp == userp) { /* found a duplicate; update request with latest info */ duplicate = 1; break; } } if (!duplicate) { cm_bkgQueueCountp[daemonID]++; osi_QAddH((osi_queue_t **) &cm_bkgListpp[daemonID], (osi_queue_t **)&cm_bkgListEndpp[daemonID], &rp->q); } lock_ReleaseWrite(&cm_daemonLockp[daemonID]); if (duplicate) { cm_ReleaseSCache(scp); cm_ReleaseUser(userp); free(rp); } else { osi_Wakeup((LONG_PTR) &cm_bkgListpp[daemonID]); } }
static void lock_FreeLockRef(osi_lock_ref_t * lockRefp) { EnterCriticalSection(&lock_ref_CS); osi_QAddH( (osi_queue_t **) &lock_ref_FreeListp, (osi_queue_t **) &lock_ref_FreeListEndp, &lockRefp->q); LeaveCriticalSection(&lock_ref_CS); }
int lock_TryWrite(struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i=lockp->type) != 0) if (i >= 0 && i < OSI_NLOCKTYPES) return (osi_lockOps[i]->TryWriteProc)(lockp); /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) { for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW) { osi_assertx(lockRefp->rw != lockp, "RW Lock already held"); } } } } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) || (lockp->readers > 0)) { i = 0; } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = thrd_Current(); i = 1; } LeaveCriticalSection(csp); if (lockOrderValidation && i) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } return i; }
void osi_TWaitExt(osi_turnstile_t *turnp, int waitFor, void *patchp, DWORD *tidp, CRITICAL_SECTION *releasep, int prepend) { osi_sleepInfo_t *sp; unsigned int code; sp = TlsGetValue(osi_SleepSlot); if (sp == NULL) { sp = osi_AllocSleepInfo(); TlsSetValue(osi_SleepSlot, sp); } else { _InterlockedAnd(&sp->states, 0); } sp->waitFor = waitFor; sp->value = (LONG_PTR) patchp; sp->tidp = tidp; sp->idx = -1; if (prepend) osi_QAddH((osi_queue_t **) &turnp->firstp, (osi_queue_t **) &turnp->lastp, &sp->q); else osi_QAddT((osi_queue_t **) &turnp->firstp, (osi_queue_t **) &turnp->lastp, &sp->q); LeaveCriticalSection(releasep); /* now wait for the signal */ while(1) { /* wait */ code = WaitForSingleObject(sp->sema, /* timeout */ INFINITE); /* if the reason for the wakeup was that we were signalled, * break out, otherwise try again, since the semaphore count is * decreased only when we get WAIT_OBJECT_0 back. */ if (code == WAIT_OBJECT_0) break; } /* while we're waiting */ /* we're the only one who should be looking at or changing this * structure after it gets signalled. Sema sp->sema isn't signalled * any longer after we're back from WaitForSingleObject, so we can * free this element directly. */ osi_assert(sp->states & OSI_SLEEPINFO_SIGNALLED); osi_FreeSleepInfo(sp); /* reobtain, since caller commonly needs it */ EnterCriticalSection(releasep); }
void lock_ObtainMutex(struct osi_mutex *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i=lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ObtainMutexProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) lock_VerifyOrderMX(lockRefH, lockRefT, lockp); } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { lockp->waiters++; osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL); } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid = thrd_Current(); } LeaveCriticalSection(csp); if (lockOrderValidation) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } }
/* * Get an acl cache entry for a particular user and file, or return that it doesn't exist. * Called with the scp write locked. */ long cm_FindACLCache(cm_scache_t *scp, cm_user_t *userp, afs_uint32 *rightsp) { cm_aclent_t *aclp; long retval = -1; time_t now = time(NULL); lock_AssertWrite(&scp->rw); lock_ObtainWrite(&cm_aclLock); *rightsp = 0; /* get a new acl from server if we don't find a * current entry */ for (aclp = scp->randomACLp; aclp; aclp = aclp->nextp) { if (aclp->userp == userp) { if (aclp->tgtLifetime && aclp->tgtLifetime <= now) { /* ticket expired */ osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); CleanupACLEnt(aclp); /* move to the tail of the LRU queue */ osi_QAddT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); } else { *rightsp = aclp->randomAccess; if (cm_data.aclLRUp != aclp) { /* move to the head of the LRU queue */ osi_QRemoveHT((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); } retval = 0; /* success */ } break; } } lock_ReleaseWrite(&cm_aclLock); return retval; }
/* * Initialize the cache to have an entries. Called during system startup. */ long cm_InitACLCache(int newFile, long size) { cm_aclent_t *aclp; long i; static osi_once_t once; if (osi_Once(&once)) { lock_InitializeRWLock(&cm_aclLock, "cm_aclLock", LOCK_HIERARCHY_ACL_GLOBAL); osi_EndOnce(&once); } lock_ObtainWrite(&cm_aclLock); if ( newFile ) { cm_data.aclLRUp = cm_data.aclLRUEndp = NULL; aclp = (cm_aclent_t *) cm_data.aclBaseAddress; memset(aclp, 0, size * sizeof(cm_aclent_t)); /* * Put all of these guys on the LRU queue */ for (i = 0; i < size; i++) { aclp->magic = CM_ACLENT_MAGIC; osi_QAddH((osi_queue_t **) &cm_data.aclLRUp, (osi_queue_t **) &cm_data.aclLRUEndp, &aclp->q); aclp++; } } else { aclp = (cm_aclent_t *) cm_data.aclBaseAddress; for (i = 0; i < size; i++) { aclp->userp = NULL; aclp->tgtLifetime = 0; aclp++; } } lock_ReleaseWrite(&cm_aclLock); return 0; }