void lock_ConvertWToR(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ConvertWToRProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread"); /* convert write lock to read lock */ lockp->flags &= ~OSI_LOCKFLAG_EXCL; lockp->readers++; osi_assertx(lockp->readers == 1, "read lock not one"); if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, /* still have readers */ 1, csp); } else { /* and finally release the big lock */ LeaveCriticalSection(csp); } }
void lock_ObtainWrite(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i=lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ObtainWriteProc)(lockp); return; } if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) lock_VerifyOrderRW(lockRefH, lockRefT, lockp); } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockp->flags & OSI_LOCKFLAG_EXCL) { osi_assertx(lockp->tid[0] != tid, "OSI_RWLOCK_WRITEHELD"); } else { for ( i=0; i < lockp->readers && i < OSI_RWLOCK_THREADS; i++ ) { osi_assertx(lockp->tid[i] != tid, "OSI_RWLOCK_READHELD"); } } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) || (lockp->readers > 0)) { lockp->waiters++; osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = tid; } osi_assertx(lockp->readers == 0, "write lock readers present"); LeaveCriticalSection(csp); if (lockOrderValidation) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } }
/* add an element to a log */ void osi_LogAdd(osi_log_t *logp, char *formatp, size_t p0, size_t p1, size_t p2, size_t p3, size_t p4) { osi_logEntry_t *lep; long ix; LARGE_INTEGER bigTime; /* handle init races */ if (!logp) return; /* do this w/o locking for speed; it is obviously harmless if we're off * by a bit. */ if (!logp->enabled) return; thrd_EnterCrit(&logp->cs); if (logp->nused < logp->alloc) logp->nused++; else { logp->first++; if (logp->first >= logp->alloc) logp->first -= logp->alloc; } ix = logp->first + logp->nused - 1; if (ix >= logp->alloc) ix -= logp->alloc; lep = logp->datap + ix; /* ptr arith */ lep->tid = thrd_Current(); /* get the time, using the high res timer if available */ if (osi_logFreq) { QueryPerformanceCounter(&bigTime); lep->micros = (bigTime.LowPart / osi_logFreq) * osi_logTixToMicros; } else lep->micros = GetCurrentTime() * 1000; lep->formatp = formatp; lep->parms[0] = p0; lep->parms[1] = p1; lep->parms[2] = p2; lep->parms[3] = p3; lep->parms[4] = p4; #ifdef NOTSERVICE printf( "%9ld:", lep->micros ); printf( formatp, p0, p1, p2, p3, p4); printf( "\n" ); #endif if(ISCLIENTDEBUGLOG(osi_TraceOption)) { char wholemsg[1024], msg[1000]; StringCbPrintfA(msg, sizeof(msg), formatp, p0, p1, p2, p3, p4); StringCbPrintfA(wholemsg, sizeof(wholemsg), "tid[%d] %s\n", lep->tid, msg); OutputDebugStringA(wholemsg); } thrd_LeaveCrit(&logp->cs); }
void osi_SleepR(LONG_PTR sleepVal, struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepRProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "osi_SleepR: not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } /* XXX better to get the list of things to wakeup from TSignalForMLs, and * then do the wakeup after SleepSpin releases the low-level mutex. */ if (--(lockp->readers) == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* now call into scheduler to sleep atomically with releasing spin lock */ osi_SleepSpin(sleepVal, csp); }
int lock_TryWrite(struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i=lockp->type) != 0) if (i >= 0 && i < OSI_NLOCKTYPES) return (osi_lockOps[i]->TryWriteProc)(lockp); /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) { for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW) { osi_assertx(lockRefp->rw != lockp, "RW Lock already held"); } } } } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL) || (lockp->readers > 0)) { i = 0; } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = thrd_Current(); i = 1; } LeaveCriticalSection(csp); if (lockOrderValidation && i) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_RW); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } return i; }
void lock_ReleaseWrite(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseWriteProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "write lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "write lock not held"); osi_assertx(lockp->tid[0] == thrd_Current(), "write lock not held by current thread"); lockp->tid[0] = 0; lockp->flags &= ~OSI_LOCKFLAG_EXCL; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { /* and finally release the big lock */ LeaveCriticalSection(csp); } }
void lock_ObtainMutex(struct osi_mutex *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; if ((i=lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ObtainMutexProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); if (lockp->level != 0) lock_VerifyOrderMX(lockRefH, lockRefT, lockp); } /* here we have the fast lock, so see if we can obtain the real lock */ if (lockp->waiters > 0 || (lockp->flags & OSI_LOCKFLAG_EXCL)) { lockp->waiters++; osi_TWait(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, &lockp->tid, csp); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->flags & OSI_LOCKFLAG_EXCL); } else { /* if we're here, all clear to set the lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid = thrd_Current(); } LeaveCriticalSection(csp); if (lockOrderValidation) { lockRefp = lock_GetLockRef(lockp, OSI_LOCK_MUTEX); osi_QAddH(&lockRefH, &lockRefT, &lockRefp->q); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } }
void osi_SleepW(LONG_PTR sleepVal, struct osi_rwlock *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->SleepWProc)(sleepVal, lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); break; } } TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->flags & OSI_LOCKFLAG_EXCL, "osi_SleepW: not held"); lockp->flags &= ~OSI_LOCKFLAG_EXCL; lockp->tid[0] = 0; if (lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, NULL); } /* and finally release the big lock */ osi_SleepSpin(sleepVal, csp); }
void lock_ConvertRToW(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ConvertRToWProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); osi_assertx(!(lockp->flags & OSI_LOCKFLAG_EXCL), "write lock held"); osi_assertx(lockp->readers > 0, "read lock not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } if (--(lockp->readers) == 0) { /* convert read lock to write lock */ lockp->flags |= OSI_LOCKFLAG_EXCL; lockp->tid[0] = tid; } else { osi_assertx(lockp->readers > 0, "read lock underflow"); lockp->waiters++; osi_TWaitExt(&lockp->d.turn, OSI_SLEEPINFO_W4WRITE, &lockp->flags, lockp->tid, csp, FALSE); lockp->waiters--; osi_assertx(lockp->waiters >= 0, "waiters underflow"); osi_assert(lockp->readers == 0 && (lockp->flags & OSI_LOCKFLAG_EXCL)); } LeaveCriticalSection(csp); }
void lock_ReleaseRead(osi_rwlock_t *lockp) { long i; CRITICAL_SECTION *csp; osi_queue_t * lockRefH, *lockRefT; osi_lock_ref_t *lockRefp; DWORD tid = thrd_Current(); if ((i = lockp->type) != 0) { if (i >= 0 && i < OSI_NLOCKTYPES) (osi_lockOps[i]->ReleaseReadProc)(lockp); return; } /* otherwise we're the fast base type */ csp = &osi_baseAtomicCS[lockp->atomicIndex]; EnterCriticalSection(csp); if (lockOrderValidation && lockp->level != 0) { int found = 0; lockRefH = (osi_queue_t *)TlsGetValue(tls_LockRefH); lockRefT = (osi_queue_t *)TlsGetValue(tls_LockRefT); for (lockRefp = (osi_lock_ref_t *)lockRefH ; lockRefp; lockRefp = (osi_lock_ref_t *)osi_QNext(&lockRefp->q)) { if (lockRefp->type == OSI_LOCK_RW && lockRefp->rw == lockp) { osi_QRemoveHT(&lockRefH, &lockRefT, &lockRefp->q); lock_FreeLockRef(lockRefp); found = 1; break; } } osi_assertx(found, "read lock not found in TLS queue"); TlsSetValue(tls_LockRefH, lockRefH); TlsSetValue(tls_LockRefT, lockRefT); } osi_assertx(lockp->readers > 0, "read lock not held"); for ( i=0; i < lockp->readers; i++) { if ( lockp->tid[i] == tid ) { for ( ; i < lockp->readers - 1; i++) lockp->tid[i] = lockp->tid[i+1]; lockp->tid[i] = 0; break; } } lockp->readers--; /* releasing a read lock can allow writers */ if (lockp->readers == 0 && lockp->waiters) { osi_TSignalForMLs(&lockp->d.turn, 0, csp); } else { osi_assertx(lockp->readers >= 0, "read lock underflow"); /* and finally release the big lock */ LeaveCriticalSection(csp); } }