void MXUser_ReleaseRecLock(MXUserRecLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (UNLIKELY(lock->vmmLock != NULL)) { ASSERT(MXUserMX_UnlockRec); (*MXUserMX_UnlockRec)(lock->vmmLock); } else { if (vmx86_stats) { MXUserHeldStats *heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (LIKELY(heldStats != NULL)) { if (MXRecLockCount(&lock->recursiveLock) == 1) { MXUserHeldStats *heldStats; heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (UNLIKELY(heldStats != NULL)) { VmTimeType value; MXUserHisto *histo = Atomic_ReadPtr(&heldStats->histo); value = Hostinfo_SystemTimerNS() - heldStats->holdStart; MXUserBasicStatsSample(&heldStats->data, value); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } } } if (vmx86_debug) { if (MXRecLockCount(&lock->recursiveLock) == 0) { MXUserDumpAndPanic(&lock->header, "%s: Release of an unacquired recursive lock\n", __FUNCTION__); } if (!MXRecLockIsOwner(&lock->recursiveLock)) { MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an recursive lock\n", __FUNCTION__); } } MXUserReleaseTracking(&lock->header); MXRecLockRelease(&lock->recursiveLock); } }
void MXUserAddToList(MXUserHeader *header) // IN/OUT: { MXRecLock *listLock = MXUserInternalSingleton(&mxLockMemPtr); /* Tolerate a failure. This is too low down to log */ if (listLock) { MXRecLockAcquire(listLock, NULL); // non-stats LIST_QUEUE(&header->item, &mxUserLockList); MXRecLockRelease(listLock); } }
void MXUser_ReleaseExclLock(MXUserExclLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserHisto *histo; VmTimeType value = Hostinfo_SystemTimerNS() - stats->holdStart; MXUserBasicStatsSample(&stats->heldStats, value); histo = Atomic_ReadPtr(&stats->heldHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } if (vmx86_debug && !MXRecLockIsOwner(&lock->recursiveLock)) { int lockCount = MXRecLockCount(&lock->recursiveLock); MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an %s exclusive lock\n", __FUNCTION__, lockCount == 0 ? "unacquired" : "acquired"); } MXUserReleaseTracking(&lock->header); MXRecLockRelease(&lock->recursiveLock); }
static INLINE void MXUserWaitInternal(MXRecLock *lock, // IN: MXUserCondVar *condVar, // IN: uint32 msecWait) // IN: { int lockCount = MXRecLockCount(lock); DWORD waitTime = (msecWait == MXUSER_WAIT_INFINITE) ? INFINITE : msecWait; if (pSleepConditionVariableCS) { /* * When using the native lock found within the MXUser lock, be sure to * decrement the count before the wait/sleep and increment it after the * wait/sleep - the (native) wait/sleep will perform a lock release * before the wait/sleep and a lock acquisition after the wait/sleep. * The MXUser internal accounting information must be maintained. */ MXRecLockDecCount(lock, lockCount); (*pSleepConditionVariableCS)(&condVar->x.condObject, &lock->nativeLock, waitTime); MXRecLockIncCount(lock, lockCount); } else { DWORD err; Bool done = FALSE; EnterCriticalSection(&condVar->x.compat.condVarLock); condVar->x.compat.numWaiters++; LeaveCriticalSection(&condVar->x.compat.condVarLock); MXRecLockDecCount(lock, lockCount - 1); MXRecLockRelease(lock); do { DWORD status = WaitForSingleObject(condVar->x.compat.signalEvent, waitTime); EnterCriticalSection(&condVar->x.compat.condVarLock); ASSERT(condVar->x.compat.numWaiters > 0); if (status == WAIT_OBJECT_0) { if (condVar->x.compat.numForRelease > 0) { condVar->x.compat.numWaiters--; if (--condVar->x.compat.numForRelease == 0) { ResetEvent(condVar->x.compat.signalEvent); } err = ERROR_SUCCESS; done = TRUE; } } else { condVar->x.compat.numWaiters--; if (status == WAIT_TIMEOUT) { if (msecWait == MXUSER_WAIT_INFINITE) { err = ERROR_CALL_NOT_IMPLEMENTED; // ACK! "IMPOSSIBLE" } else { err = ERROR_SUCCESS; } } else if (status == WAIT_ABANDONED) { err = ERROR_WAIT_NO_CHILDREN; } else { ASSERT(status == WAIT_FAILED); err = GetLastError(); } done = TRUE; } LeaveCriticalSection(&condVar->x.compat.condVarLock); } while (!done); MXRecLockAcquire(lock, NULL); // non-stats MXRecLockIncCount(lock, lockCount - 1); if (err != ERROR_SUCCESS) { Panic("%s: failure %d on condVar (0x%p; %s)\n", __FUNCTION__, err, condVar, condVar->header->name); } } }
void MXUser_ReleaseRWLock(MXUserRWLock *lock) // IN/OUT: { HolderContext *myContext; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW); myContext = MXUserGetHolderContext(lock); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserHisto *histo; VmTimeType duration = Hostinfo_SystemTimerNS() - myContext->holdStart; /* * The statistics are not always atomically safe so protect them * when necessary */ if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } MXUserBasicStatsSample(&stats->heldStats, duration); histo = Atomic_ReadPtr(&stats->heldHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, duration, GetReturnAddress()); } if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) { MXRecLockRelease(&lock->recursiveLock); } } } if (UNLIKELY(myContext->state == RW_UNLOCKED)) { uint32 lockCount = Atomic_Read(&lock->holderCount); MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an %s read-write lock\n", __FUNCTION__, lockCount == 0 ? "unacquired" : "acquired"); } MXUserReleaseTracking(&lock->header); Atomic_Dec(&lock->holderCount); if (LIKELY(lock->useNative)) { int err = MXUserNativeRWRelease(&lock->nativeLock, myContext->state == RW_LOCKED_FOR_READ); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } } else { ASSERT(Atomic_Read(&lock->holderCount) == 0); MXRecLockRelease(&lock->recursiveLock); } myContext->state = RW_UNLOCKED; }
static INLINE void MXUserAcquisition(MXUserRWLock *lock, // IN/OUT: Bool forRead) // IN: { HolderContext *myContext; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW); MXUserAcquisitionTracking(&lock->header, TRUE); myContext = MXUserGetHolderContext(lock); if (UNLIKELY(myContext->state != RW_UNLOCKED)) { MXUserDumpAndPanic(&lock->header, "%s: AcquireFor%s after AcquireFor%s\n", __FUNCTION__, forRead ? "Read" : "Write", (myContext->state == RW_LOCKED_FOR_READ) ? "Read" : "Write"); } if (vmx86_stats) { VmTimeType value; MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (lock->useNative) { int err = 0; Bool contended; VmTimeType begin = Hostinfo_SystemTimerNS(); contended = MXUserNativeRWAcquire(&lock->nativeLock, forRead, &err); value = contended ? Hostinfo_SystemTimerNS() - begin : 0; if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Error %d: contended %d\n", __FUNCTION__, err, contended); } } else { value = 0; MXRecLockAcquire(&lock->recursiveLock, (stats == NULL) ? NULL : &value); } if (LIKELY(stats != NULL)) { MXUserHisto *histo; /* * The statistics are not atomically safe so protect them when * necessary. */ if (forRead && lock->useNative) { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } MXUserAcquisitionSample(&stats->acquisitionStats, TRUE, value != 0, value); histo = Atomic_ReadPtr(&stats->acquisitionHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } if (forRead && lock->useNative) { MXRecLockRelease(&lock->recursiveLock); } myContext->holdStart = Hostinfo_SystemTimerNS(); } } else { if (LIKELY(lock->useNative)) { int err = 0; MXUserNativeRWAcquire(&lock->nativeLock, forRead, &err); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Error %d\n", __FUNCTION__, err); } } else { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } } if (!forRead || !lock->useNative) { ASSERT(Atomic_Read(&lock->holderCount) == 0); } Atomic_Inc(&lock->holderCount); myContext->state = forRead ? RW_LOCKED_FOR_READ : RW_LOCKED_FOR_WRITE; }