Bool MXUser_IsCurThreadHoldingRWLock(MXUserRWLock *lock, // IN: uint32 queryType) // IN: { HolderContext *myContext; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW); myContext = MXUserGetHolderContext(lock); switch (queryType) { case MXUSER_RW_FOR_READ: return myContext->state == RW_LOCKED_FOR_READ; case MXUSER_RW_FOR_WRITE: return myContext->state == RW_LOCKED_FOR_WRITE; case MXUSER_RW_LOCKED: return myContext->state != RW_UNLOCKED; default: Panic("%s: unknown query type %d\n", __FUNCTION__, queryType); } }
static void MXUserCondDestroyRecLock(MXUserRecLock *lock) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (Atomic_ReadDec32(&lock->refCount) == 1) { if (lock->vmmLock == NULL) { if (MXRecLockCount(&lock->recursiveLock) > 0) { MXUserDumpAndPanic(&lock->header, "%s: Destroy of an acquired recursive lock\n", __FUNCTION__); } MXRecLockDestroy(&lock->recursiveLock); MXUserRemoveFromList(&lock->header); if (vmx86_stats) { MXUserDisableStats(&lock->acquireStatsMem, &lock->heldStatsMem); } } lock->header.signature = 0; // just in case... free(lock->header.name); lock->header.name = NULL; free(lock); } }
Bool MXUser_TryAcquireExclLock(MXUserExclLock *lock) // IN/OUT: { Bool success; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); if (MXUserTryAcquireFail(lock->header.name)) { return FALSE; } success = MXRecLockTryAcquire(&lock->recursiveLock); if (success) { MXUserAcquisitionTracking(&lock->header, FALSE); if (vmx86_debug && (MXRecLockCount(&lock->recursiveLock) > 1)) { MXUserDumpAndPanic(&lock->header, "%s: Acquire on an acquired exclusive lock\n", __FUNCTION__); } } if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserAcquisitionSample(&stats->acquisitionStats, success, !success, 0ULL); } } return success; }
Bool MXUser_TryDownSemaphore(MXUserSemaphore *sema) // IN/OUT: { int err; Bool downOccurred = FALSE; ASSERT(sema); MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); Atomic_Inc(&sema->activeUserCount); err = MXUserTryDown(&sema->nativeSemaphore, &downOccurred); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } if (vmx86_stats) { MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&sema->acquireStatsMem); if (LIKELY(acquireStats != NULL)) { MXUserAcquisitionSample(&acquireStats->data, downOccurred, !downOccurred, 0ULL); } } Atomic_Dec(&sema->activeUserCount); return downOccurred; }
struct MX_MutexRec * MXUser_GetRecLockVmm(MXUserRecLock *lock) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); return lock->vmmLock; }
void MXUser_DecRefRecLock(MXUserRecLock *lock) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); MXUserCondDestroyRecLock(lock); }
Bool MXUser_IsCurThreadHoldingExclLock(MXUserExclLock *lock) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); return MXRecLockIsOwner(&lock->recursiveLock); }
MXUserCondVar * MXUser_CreateCondVarExclLock(MXUserExclLock *lock) { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); return MXUserCreateCondVar(&lock->header, &lock->recursiveLock); }
void MXUser_DumpRecLock(MXUserRecLock *lock) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); MXUserDumpRecLock(&lock->header); }
MX_Rank MXUser_GetRecLockRank(MXUserRecLock *lock) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); return lock->header.rank; }
void MXUser_IncRefRecLock(MXUserRecLock *lock) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); Atomic_Inc(&lock->refCount); }
void MXUser_DownSemaphore(MXUserSemaphore *sema) // IN/OUT: { int err; ASSERT(sema); MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); Atomic_Inc(&sema->activeUserCount); MXUserAcquisitionTracking(&sema->header, TRUE); // rank checking if (vmx86_stats) { VmTimeType start = 0; Bool tryDownSuccess = FALSE; MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&sema->acquireStatsMem); if (LIKELY(acquireStats != NULL)) { start = Hostinfo_SystemTimerNS(); } err = MXUserTryDown(&sema->nativeSemaphore, &tryDownSuccess); if (LIKELY(err == 0)) { if (!tryDownSuccess) { err = MXUserDown(&sema->nativeSemaphore); } } if (LIKELY((err == 0) && (acquireStats != NULL))) { MXUserHisto *histo; VmTimeType value = Hostinfo_SystemTimerNS() - start; MXUserAcquisitionSample(&acquireStats->data, TRUE, !tryDownSuccess, value); histo = Atomic_ReadPtr(&acquireStats->histo); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } else { err = MXUserDown(&sema->nativeSemaphore); } if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } MXUserReleaseTracking(&sema->header); Atomic_Dec(&sema->activeUserCount); }
void MXUser_TimedWaitCondVarExclLock(MXUserExclLock *lock, // IN: MXUserCondVar *condVar, // IN: uint32 msecWait) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); MXUserWaitCondVar(&lock->header, &lock->recursiveLock, condVar, msecWait); }
void MXUser_WaitCondVarExclLock(MXUserExclLock *lock, // IN: MXUserCondVar *condVar) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); MXUserWaitCondVar(&lock->header, &lock->recursiveLock, condVar, MXUSER_WAIT_INFINITE); }
void MXUser_ReleaseRecLock(MXUserRecLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (UNLIKELY(lock->vmmLock != NULL)) { ASSERT(MXUserMX_UnlockRec); (*MXUserMX_UnlockRec)(lock->vmmLock); } else { if (vmx86_stats) { MXUserHeldStats *heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (LIKELY(heldStats != NULL)) { if (MXRecLockCount(&lock->recursiveLock) == 1) { MXUserHeldStats *heldStats; heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (UNLIKELY(heldStats != NULL)) { VmTimeType value; MXUserHisto *histo = Atomic_ReadPtr(&heldStats->histo); value = Hostinfo_SystemTimerNS() - heldStats->holdStart; MXUserBasicStatsSample(&heldStats->data, value); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } } } if (vmx86_debug) { if (MXRecLockCount(&lock->recursiveLock) == 0) { MXUserDumpAndPanic(&lock->header, "%s: Release of an unacquired recursive lock\n", __FUNCTION__); } if (!MXRecLockIsOwner(&lock->recursiveLock)) { MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an recursive lock\n", __FUNCTION__); } } MXUserReleaseTracking(&lock->header); MXRecLockRelease(&lock->recursiveLock); } }
void MXUser_TimedWaitCondVarRecLock(MXUserRecLock *lock, // IN: MXUserCondVar *condVar, // IN: uint32 msecWait) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(lock->vmmLock == NULL); // only unbound locks MXUserWaitCondVar(&lock->header, &lock->recursiveLock, condVar, msecWait); }
void MXUser_WaitCondVarRecLock(MXUserRecLock *lock, // IN: MXUserCondVar *condVar) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(lock->vmmLock == NULL); // only unbound locks MXUserWaitCondVar(&lock->header, &lock->recursiveLock, condVar, MXUSER_WAIT_INFINITE); }
Bool MXUser_DisableStatsRecLock(MXUserRecLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); if (vmx86_stats) { MXUserDisableStats(&lock->acquireStatsMem, &lock->heldStatsMem); } return vmx86_stats; }
MXUserCondVar * MXUser_CreateCondVarRecLock(MXUserRecLock *lock) { MXUserCondVar *condVar; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(lock->vmmLock == NULL); // only unbound locks condVar = MXUserCreateCondVar(&lock->header, &lock->recursiveLock); return condVar; }
void MXUser_AcquireRecLock(MXUserRecLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (UNLIKELY(lock->vmmLock != NULL)) { ASSERT(MXUserMX_LockRec); (*MXUserMX_LockRec)(lock->vmmLock); } else { /* Rank checking is only done on the first acquisition */ MXUserAcquisitionTracking(&lock->header, TRUE); if (vmx86_stats) { VmTimeType value = 0; MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&lock->acquireStatsMem); MXRecLockAcquire(&lock->recursiveLock, (acquireStats == NULL) ? NULL : &value); if (LIKELY(acquireStats != NULL)) { if (MXRecLockCount(&lock->recursiveLock) == 1) { MXUserHeldStats *heldStats; MXUserHisto *histo; MXUserAcquisitionSample(&acquireStats->data, TRUE, value > acquireStats->data.contentionDurationFloor, value); histo = Atomic_ReadPtr(&acquireStats->histo); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (UNLIKELY(heldStats != NULL)) { heldStats->holdStart = Hostinfo_SystemTimerNS(); } } } } else { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } } }
Bool MXUser_EnableStatsRecLock(MXUserRecLock *lock, // IN/OUT: Bool trackAcquisitionTime, // IN: Bool trackHeldTime) // IN: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); if (vmx86_stats) { MXUserEnableStats(trackAcquisitionTime ? &lock->acquireStatsMem : NULL, trackHeldTime ? &lock->heldStatsMem : NULL); } return vmx86_stats; }
void MXUser_DestroyRWLock(MXUserRWLock *lock) // IN: { if (LIKELY(lock != NULL)) { MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW); if (Atomic_Read(&lock->holderCount) != 0) { MXUserDumpAndPanic(&lock->header, "%s: Destroy on an acquired read-write lock\n", __FUNCTION__); } if (LIKELY(lock->useNative)) { int err = MXUserNativeRWDestroy(&lock->nativeLock); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } } lock->header.signature = 0; // just in case... MXRecLockDestroy(&lock->recursiveLock); MXUserRemoveFromList(&lock->header); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserAcquisitionStatsTearDown(&stats->acquisitionStats); MXUserHistoTearDown(Atomic_ReadPtr(&stats->acquisitionHisto)); MXUserBasicStatsTearDown(&stats->heldStats); MXUserHistoTearDown(Atomic_ReadPtr(&stats->heldHisto)); free(stats); } } HashTable_FreeUnsafe(lock->holderTable); free(lock->header.name); lock->header.name = NULL; free(lock); } }
Bool MXUser_SetContentionCountFloorRecLock(MXUserRecLock *lock, // IN/OUT: uint64 count) // IN: { Bool result; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); if (vmx86_stats) { result = MXUserSetContentionCountFloor(&lock->acquireStatsMem, count); } else { result = FALSE; } return result; }
Bool MXUser_IsCurThreadHoldingRecLock(MXUserRecLock *lock) // IN: { Bool result; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (UNLIKELY(lock->vmmLock != NULL)) { ASSERT(MXUserMX_IsLockedByCurThreadRec); result = (*MXUserMX_IsLockedByCurThreadRec)(lock->vmmLock); } else { result = MXRecLockIsOwner(&lock->recursiveLock); } return result; }
void MXUser_UpSemaphore(MXUserSemaphore *sema) // IN/OUT: { int err; ASSERT(sema); MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); Atomic_Inc(&sema->activeUserCount); err = MXUserUp(&sema->nativeSemaphore); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } Atomic_Dec(&sema->activeUserCount); }
void MXUser_DestroySemaphore(MXUserSemaphore *sema) // IN: { if (LIKELY(sema != NULL)) { int err; MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); if (Atomic_Read(&sema->activeUserCount) != 0) { MXUserDumpAndPanic(&sema->header, "%s: Attempted destroy on semaphore while in use\n", __FUNCTION__); } sema->header.signature = 0; // just in case... err = MXUserDestroy(&sema->nativeSemaphore); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } MXUserRemoveFromList(&sema->header); if (vmx86_stats) { MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&sema->acquireStatsMem); if (LIKELY(acquireStats != NULL)) { MXUserAcquisitionStatsTearDown(&acquireStats->data); MXUserHistoTearDown(Atomic_ReadPtr(&acquireStats->histo)); free(acquireStats); } } free(sema->header.name); sema->header.name = NULL; free(sema); } }
Bool MXUser_TryAcquireRecLock(MXUserRecLock *lock) // IN/OUT: { Bool success; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (UNLIKELY(lock->vmmLock != NULL)) { ASSERT(MXUserMX_TryLockRec); success = (*MXUserMX_TryLockRec)(lock->vmmLock); } else { if (MXUserTryAcquireFail(lock->header.name)) { success = FALSE; goto bail; } success = MXRecLockTryAcquire(&lock->recursiveLock); if (success) { MXUserAcquisitionTracking(&lock->header, FALSE); } if (vmx86_stats) { MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&lock->acquireStatsMem); if (LIKELY(acquireStats != NULL)) { MXUserAcquisitionSample(&acquireStats->data, success, !success, 0ULL); } } } bail: return success; }
void MXUser_AcquireExclLock(MXUserExclLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); MXUserAcquisitionTracking(&lock->header, TRUE); if (vmx86_stats) { VmTimeType value = 0; MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); MXRecLockAcquire(&lock->recursiveLock, (stats == NULL) ? NULL : &value); if (LIKELY(stats != NULL)) { MXUserHisto *histo; MXUserAcquisitionSample(&stats->acquisitionStats, TRUE, value != 0, value); histo = Atomic_ReadPtr(&stats->acquisitionHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } stats->holdStart = Hostinfo_SystemTimerNS(); } } else { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } if (vmx86_debug && (MXRecLockCount(&lock->recursiveLock) > 1)) { MXUserDumpAndPanic(&lock->header, "%s: Acquire on an acquired exclusive lock\n", __FUNCTION__); } }
void MXUser_DestroyExclLock(MXUserExclLock *lock) // IN: { if (lock != NULL) { MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); if (MXRecLockCount(&lock->recursiveLock) > 0) { MXUserDumpAndPanic(&lock->header, "%s: Destroy of an acquired exclusive lock\n", __FUNCTION__); } lock->header.signature = 0; // just in case... MXRecLockDestroy(&lock->recursiveLock); MXUserRemoveFromList(&lock->header); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserAcquisitionStatsTearDown(&stats->acquisitionStats); MXUserHistoTearDown(Atomic_ReadPtr(&stats->acquisitionHisto)); MXUserBasicStatsTearDown(&stats->heldStats); MXUserHistoTearDown(Atomic_ReadPtr(&stats->heldHisto)); free(stats); } } free(lock->header.name); lock->header.name = NULL; free(lock); } }
void MXUser_ReleaseExclLock(MXUserExclLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserHisto *histo; VmTimeType value = Hostinfo_SystemTimerNS() - stats->holdStart; MXUserBasicStatsSample(&stats->heldStats, value); histo = Atomic_ReadPtr(&stats->heldHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } if (vmx86_debug && !MXRecLockIsOwner(&lock->recursiveLock)) { int lockCount = MXRecLockCount(&lock->recursiveLock); MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an %s exclusive lock\n", __FUNCTION__, lockCount == 0 ? "unacquired" : "acquired"); } MXUserReleaseTracking(&lock->header); MXRecLockRelease(&lock->recursiveLock); }