void MXUser_DownSemaphore(MXUserSemaphore *sema) // IN/OUT: { int err; ASSERT(sema); MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); Atomic_Inc(&sema->activeUserCount); MXUserAcquisitionTracking(&sema->header, TRUE); // rank checking if (vmx86_stats) { VmTimeType start = 0; Bool tryDownSuccess = FALSE; MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&sema->acquireStatsMem); if (LIKELY(acquireStats != NULL)) { start = Hostinfo_SystemTimerNS(); } err = MXUserTryDown(&sema->nativeSemaphore, &tryDownSuccess); if (LIKELY(err == 0)) { if (!tryDownSuccess) { err = MXUserDown(&sema->nativeSemaphore); } } if (LIKELY((err == 0) && (acquireStats != NULL))) { MXUserHisto *histo; VmTimeType value = Hostinfo_SystemTimerNS() - start; MXUserAcquisitionSample(&acquireStats->data, TRUE, !tryDownSuccess, value); histo = Atomic_ReadPtr(&acquireStats->histo); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } else { err = MXUserDown(&sema->nativeSemaphore); } if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } MXUserReleaseTracking(&sema->header); Atomic_Dec(&sema->activeUserCount); }
static int MXUserTimedDown(NativeSemaphore *sema, // IN: uint32 msecWait, // IN: Bool *downOccurred) // OUT: { uint64 nsecWait; VmTimeType before; kern_return_t err; ASSERT_ON_COMPILE(KERN_SUCCESS == 0); /* * Work in nanoseconds. Time the semaphore_timedwait operation in case * it is interrupted (KERN_ABORT). If it is, determine how much time is * necessary to fulfill the specified wait time and retry with a new * and appropriate timeout. */ nsecWait = 1000000ULL * (uint64) msecWait; before = Hostinfo_SystemTimerNS(); do { VmTimeType after; mach_timespec_t ts; ts.tv_sec = nsecWait / MXUSER_A_BILLION; ts.tv_nsec = nsecWait % MXUSER_A_BILLION; err = semaphore_timedwait(*sema, ts); after = Hostinfo_SystemTimerNS(); if (err == KERN_SUCCESS) { *downOccurred = TRUE; } else { *downOccurred = FALSE; if (err == KERN_OPERATION_TIMED_OUT) { /* Really timed out; no down occurred, no error */ err = KERN_SUCCESS; } else { if (err == KERN_ABORTED) { VmTimeType duration = after - before; if (duration < nsecWait) { nsecWait -= duration; before = after; } else { err = KERN_SUCCESS; // "timed out" anyway... no error } } } } } while (nsecWait && (err == KERN_ABORTED)); return err; }
void MXUser_ReleaseRecLock(MXUserRecLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (UNLIKELY(lock->vmmLock != NULL)) { ASSERT(MXUserMX_UnlockRec); (*MXUserMX_UnlockRec)(lock->vmmLock); } else { if (vmx86_stats) { MXUserHeldStats *heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (LIKELY(heldStats != NULL)) { if (MXRecLockCount(&lock->recursiveLock) == 1) { MXUserHeldStats *heldStats; heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (UNLIKELY(heldStats != NULL)) { VmTimeType value; MXUserHisto *histo = Atomic_ReadPtr(&heldStats->histo); value = Hostinfo_SystemTimerNS() - heldStats->holdStart; MXUserBasicStatsSample(&heldStats->data, value); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } } } if (vmx86_debug) { if (MXRecLockCount(&lock->recursiveLock) == 0) { MXUserDumpAndPanic(&lock->header, "%s: Release of an unacquired recursive lock\n", __FUNCTION__); } if (!MXRecLockIsOwner(&lock->recursiveLock)) { MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an recursive lock\n", __FUNCTION__); } } MXUserReleaseTracking(&lock->header); MXRecLockRelease(&lock->recursiveLock); } }
void MXUser_AcquireRecLock(MXUserRecLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC); ASSERT(Atomic_Read(&lock->refCount) > 0); if (UNLIKELY(lock->vmmLock != NULL)) { ASSERT(MXUserMX_LockRec); (*MXUserMX_LockRec)(lock->vmmLock); } else { /* Rank checking is only done on the first acquisition */ MXUserAcquisitionTracking(&lock->header, TRUE); if (vmx86_stats) { VmTimeType value = 0; MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&lock->acquireStatsMem); MXRecLockAcquire(&lock->recursiveLock, (acquireStats == NULL) ? NULL : &value); if (LIKELY(acquireStats != NULL)) { if (MXRecLockCount(&lock->recursiveLock) == 1) { MXUserHeldStats *heldStats; MXUserHisto *histo; MXUserAcquisitionSample(&acquireStats->data, TRUE, value > acquireStats->data.contentionDurationFloor, value); histo = Atomic_ReadPtr(&acquireStats->histo); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } heldStats = Atomic_ReadPtr(&lock->heldStatsMem); if (UNLIKELY(heldStats != NULL)) { heldStats->holdStart = Hostinfo_SystemTimerNS(); } } } } else { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } } }
void MXUser_AcquireExclLock(MXUserExclLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); MXUserAcquisitionTracking(&lock->header, TRUE); if (vmx86_stats) { VmTimeType value = 0; MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); MXRecLockAcquire(&lock->recursiveLock, (stats == NULL) ? NULL : &value); if (LIKELY(stats != NULL)) { MXUserHisto *histo; MXUserAcquisitionSample(&stats->acquisitionStats, TRUE, value != 0, value); histo = Atomic_ReadPtr(&stats->acquisitionHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } stats->holdStart = Hostinfo_SystemTimerNS(); } } else { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } if (vmx86_debug && (MXRecLockCount(&lock->recursiveLock) > 1)) { MXUserDumpAndPanic(&lock->header, "%s: Acquire on an acquired exclusive lock\n", __FUNCTION__); } }
void MXUser_ReleaseExclLock(MXUserExclLock *lock) // IN/OUT: { ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_EXCL); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserHisto *histo; VmTimeType value = Hostinfo_SystemTimerNS() - stats->holdStart; MXUserBasicStatsSample(&stats->heldStats, value); histo = Atomic_ReadPtr(&stats->heldHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } if (vmx86_debug && !MXRecLockIsOwner(&lock->recursiveLock)) { int lockCount = MXRecLockCount(&lock->recursiveLock); MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an %s exclusive lock\n", __FUNCTION__, lockCount == 0 ? "unacquired" : "acquired"); } MXUserReleaseTracking(&lock->header); MXRecLockRelease(&lock->recursiveLock); }
void MXUser_ReleaseRWLock(MXUserRWLock *lock) // IN/OUT: { HolderContext *myContext; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW); myContext = MXUserGetHolderContext(lock); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserHisto *histo; VmTimeType duration = Hostinfo_SystemTimerNS() - myContext->holdStart; /* * The statistics are not always atomically safe so protect them * when necessary */ if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } MXUserBasicStatsSample(&stats->heldStats, duration); histo = Atomic_ReadPtr(&stats->heldHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, duration, GetReturnAddress()); } if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) { MXRecLockRelease(&lock->recursiveLock); } } } if (UNLIKELY(myContext->state == RW_UNLOCKED)) { uint32 lockCount = Atomic_Read(&lock->holderCount); MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an %s read-write lock\n", __FUNCTION__, lockCount == 0 ? "unacquired" : "acquired"); } MXUserReleaseTracking(&lock->header); Atomic_Dec(&lock->holderCount); if (LIKELY(lock->useNative)) { int err = MXUserNativeRWRelease(&lock->nativeLock, myContext->state == RW_LOCKED_FOR_READ); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } } else { ASSERT(Atomic_Read(&lock->holderCount) == 0); MXRecLockRelease(&lock->recursiveLock); } myContext->state = RW_UNLOCKED; }
static INLINE void MXUserAcquisition(MXUserRWLock *lock, // IN/OUT: Bool forRead) // IN: { HolderContext *myContext; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW); MXUserAcquisitionTracking(&lock->header, TRUE); myContext = MXUserGetHolderContext(lock); if (UNLIKELY(myContext->state != RW_UNLOCKED)) { MXUserDumpAndPanic(&lock->header, "%s: AcquireFor%s after AcquireFor%s\n", __FUNCTION__, forRead ? "Read" : "Write", (myContext->state == RW_LOCKED_FOR_READ) ? "Read" : "Write"); } if (vmx86_stats) { VmTimeType value; MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (lock->useNative) { int err = 0; Bool contended; VmTimeType begin = Hostinfo_SystemTimerNS(); contended = MXUserNativeRWAcquire(&lock->nativeLock, forRead, &err); value = contended ? Hostinfo_SystemTimerNS() - begin : 0; if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Error %d: contended %d\n", __FUNCTION__, err, contended); } } else { value = 0; MXRecLockAcquire(&lock->recursiveLock, (stats == NULL) ? NULL : &value); } if (LIKELY(stats != NULL)) { MXUserHisto *histo; /* * The statistics are not atomically safe so protect them when * necessary. */ if (forRead && lock->useNative) { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } MXUserAcquisitionSample(&stats->acquisitionStats, TRUE, value != 0, value); histo = Atomic_ReadPtr(&stats->acquisitionHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } if (forRead && lock->useNative) { MXRecLockRelease(&lock->recursiveLock); } myContext->holdStart = Hostinfo_SystemTimerNS(); } } else { if (LIKELY(lock->useNative)) { int err = 0; MXUserNativeRWAcquire(&lock->nativeLock, forRead, &err); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Error %d\n", __FUNCTION__, err); } } else { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } } if (!forRead || !lock->useNative) { ASSERT(Atomic_Read(&lock->holderCount) == 0); } Atomic_Inc(&lock->holderCount); myContext->state = forRead ? RW_LOCKED_FOR_READ : RW_LOCKED_FOR_WRITE; }