Example #1
0
static void
MXUserCondDestroyRecLock(MXUserRecLock *lock)  // IN:
{
   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC);
   ASSERT(Atomic_Read(&lock->refCount) > 0);

   if (Atomic_ReadDec32(&lock->refCount) == 1) {
      if (lock->vmmLock == NULL) {
         if (MXRecLockCount(&lock->recursiveLock) > 0) {
            MXUserDumpAndPanic(&lock->header,
                               "%s: Destroy of an acquired recursive lock\n",
                               __FUNCTION__);
         }

         MXRecLockDestroy(&lock->recursiveLock);

         MXUserRemoveFromList(&lock->header);

         if (vmx86_stats) {
            MXUserDisableStats(&lock->acquireStatsMem, &lock->heldStatsMem);
         }
      }

      lock->header.signature = 0;  // just in case...
      free(lock->header.name);
      lock->header.name = NULL;
      free(lock);
   }
}
Example #2
0
Bool
VThreadBase_IsInSignal(void)
{
   VThreadBaseData *base = VThreadBaseCooked();

   return Atomic_Read(&base->signalNestCount) > 0;
}
Example #3
0
static void *
fake_getspecific(pthread_key_t key)
{
   ASSERT(key == nothreadTLSKey);
   ASSERT(Atomic_Read(&vthreadBaseGlobals.numThreads) <= 1);
   return nothreadTLSData;
}
Example #4
0
void
MXUser_IncRefRecLock(MXUserRecLock *lock)  // IN:
{
   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC);
   ASSERT(Atomic_Read(&lock->refCount) > 0);

   Atomic_Inc(&lock->refCount);
}
Example #5
0
static int
fake_setspecific(pthread_key_t key,
                 const void *pointer)
{
   ASSERT(key == nothreadTLSKey);
   ASSERT(Atomic_Read(&vthreadBaseGlobals.numThreads) <= 1);
   nothreadTLSData = (void *)pointer;
   return 0;
}
Example #6
0
static VThreadBaseKeyType
VThreadBaseGetKey(void)
{
   VThreadBaseKeyType key = Atomic_Read(&vthreadBaseGlobals.key);

   if (key == VTHREADBASE_INVALID_KEY) {
      VThreadBaseKeyType newKey;

#if defined _WIN32
      newKey = TlsAlloc();
      ASSERT_NOT_IMPLEMENTED(newKey != VTHREADBASE_INVALID_KEY);
#else
      Bool success = pthread_key_create(&newKey, 
                                        &VThreadBaseSafeDeleteTLS) == 0;
      if (success && newKey == 0) {
         /* 
          * Leak TLS key 0.  System libraries have a habit of destroying
          * it.  See bugs 702818 and 773420.
          */

         success = pthread_key_create(&newKey, 
                                      &VThreadBaseSafeDeleteTLS) == 0;
      }
      ASSERT_NOT_IMPLEMENTED(success);
#endif

      if (Atomic_ReadIfEqualWrite(&vthreadBaseGlobals.key,
                                  VTHREADBASE_INVALID_KEY,
                                  newKey) != VTHREADBASE_INVALID_KEY) {
         /* Race: someone else init'd */
#if defined _WIN32
         TlsFree(newKey);
#else
         pthread_key_delete(newKey);
#endif
      }

      key = Atomic_Read(&vthreadBaseGlobals.key);
      ASSERT(key != VTHREADBASE_INVALID_KEY);
   }

   return key;
}
Example #7
0
void
MXUser_ReleaseRecLock(MXUserRecLock *lock)  // IN/OUT:
{
   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC);
   ASSERT(Atomic_Read(&lock->refCount) > 0);

   if (UNLIKELY(lock->vmmLock != NULL)) {
      ASSERT(MXUserMX_UnlockRec);
      (*MXUserMX_UnlockRec)(lock->vmmLock);
   } else {
      if (vmx86_stats) {
         MXUserHeldStats *heldStats = Atomic_ReadPtr(&lock->heldStatsMem);

         if (LIKELY(heldStats != NULL)) {
            if (MXRecLockCount(&lock->recursiveLock) == 1) {
               MXUserHeldStats *heldStats;

               heldStats = Atomic_ReadPtr(&lock->heldStatsMem);

               if (UNLIKELY(heldStats != NULL)) {
                  VmTimeType value;
                  MXUserHisto *histo = Atomic_ReadPtr(&heldStats->histo);

                  value = Hostinfo_SystemTimerNS() - heldStats->holdStart;

                  MXUserBasicStatsSample(&heldStats->data, value);

                  if (UNLIKELY(histo != NULL)) {
                     MXUserHistoSample(histo, value, GetReturnAddress());
                  }
               }
            }
         }
      }

      if (vmx86_debug) {
         if (MXRecLockCount(&lock->recursiveLock) == 0) {
            MXUserDumpAndPanic(&lock->header,
                               "%s: Release of an unacquired recursive lock\n",
                                __FUNCTION__);
         }

         if (!MXRecLockIsOwner(&lock->recursiveLock)) {
            MXUserDumpAndPanic(&lock->header,
                               "%s: Non-owner release of an recursive lock\n",
                               __FUNCTION__);
         }
      }

      MXUserReleaseTracking(&lock->header);

      MXRecLockRelease(&lock->recursiveLock);
   }
}
Example #8
0
VMCIId
VMCI_GetContextID(void)
{
   if (VMCI_GuestPersonalityActive()) {
      if (Atomic_Read(&vmContextID) == VMCI_INVALID_ID) {
         uint32 result;
         VMCIDatagram getCidMsg;
         getCidMsg.dst =  VMCI_MAKE_HANDLE(VMCI_HYPERVISOR_CONTEXT_ID,
                                           VMCI_GET_CONTEXT_ID);
         getCidMsg.src = VMCI_ANON_SRC_HANDLE;
         getCidMsg.payloadSize = 0;
         result = VMCI_SendDatagram(&getCidMsg);
         Atomic_Write(&vmContextID, result);
      }
      return Atomic_Read(&vmContextID);
   } else if (VMCI_HostPersonalityActive()) {
      return VMCI_HOST_CONTEXT_ID;
   }
   return VMCI_INVALID_ID;
}
Example #9
0
void
VThreadBase_SetIsInSignal(VThreadID tid,    // IN:
                          Bool isInSignal)  // IN:
{
   VThreadBaseData *base = VThreadBaseCooked();

   /* It is an error to clear isInSignal while not in a signal. */
   ASSERT(Atomic_Read(&base->signalNestCount) > 0 || isInSignal);

   Atomic_Add(&base->signalNestCount, isInSignal ? 1 : -1);
}
Example #10
0
const char *
VThreadBase_CurName(void)
{
   static Atomic_Int curNameRecursion;
   VThreadBaseData *base = VThreadBaseRaw();

   if (LIKELY(base != NULL)) {
      return base->name;
   } else if (Atomic_Read(&curNameRecursion) == 0) {
      /* Unnamed thread, try to name it. */
      Atomic_Inc(&curNameRecursion);
      base = VThreadBaseCooked(); /* Assigns name as side effect */
      Atomic_Dec(&curNameRecursion);

      return base->name;
   } else {
      /*
       * Unnamed thread, but naming it failed (recursed back to here).
       * The heuristic is not perfect (a second unnamed thread could
       * be looking for a name while the first thread names itself),
       * but getting here nonrecursively is unlikely and we cannot
       * do better about detection without thread-local storage, and
       * in the recursive case thread-local storage won't exist after
       * a failure to set up thread-local storage in the first place.
       *
       * This clause function should not ASSERT, Panic or call a Str_
       * function (that can ASSERT or Panic), as the Panic handler is
       * very likey to query the thread name and end up right back here.
       * Thus, use a static buffer for a partly-sane name and hope the
       * Panic handler dumps enough information to figure out what went
       * wrong.
       */

      static char name[48];
      uintptr_t hostTid;

#if defined(_WIN32)
      hostTid = GetCurrentThreadId();
#elif defined(__linux__)
      hostTid = pthread_self();
#elif defined(__APPLE__)
      hostTid = (uintptr_t)(void*)pthread_self();
#else
      hostTid = 0;
#endif
      snprintf(name, sizeof name - 1 /* keep buffer NUL-terminated */,
               "host-%"FMTPD"u", hostTid);

      return name;
   }
}
Example #11
0
void
MXUser_AcquireRecLock(MXUserRecLock *lock)  // IN/OUT:
{
   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC);
   ASSERT(Atomic_Read(&lock->refCount) > 0);

   if (UNLIKELY(lock->vmmLock != NULL)) {
      ASSERT(MXUserMX_LockRec);
      (*MXUserMX_LockRec)(lock->vmmLock);
   } else {
      /* Rank checking is only done on the first acquisition */
      MXUserAcquisitionTracking(&lock->header, TRUE);

      if (vmx86_stats) {
         VmTimeType value = 0;
         MXUserAcquireStats *acquireStats;

         acquireStats = Atomic_ReadPtr(&lock->acquireStatsMem);

         MXRecLockAcquire(&lock->recursiveLock,
                          (acquireStats == NULL) ? NULL : &value);

         if (LIKELY(acquireStats != NULL)) {
            if (MXRecLockCount(&lock->recursiveLock) == 1) {
               MXUserHeldStats *heldStats;
               MXUserHisto *histo;

               MXUserAcquisitionSample(&acquireStats->data, TRUE,
                            value > acquireStats->data.contentionDurationFloor,
                                       value);

               histo = Atomic_ReadPtr(&acquireStats->histo);

               if (UNLIKELY(histo != NULL)) {
                  MXUserHistoSample(histo, value, GetReturnAddress());
               }

               heldStats = Atomic_ReadPtr(&lock->heldStatsMem);

               if (UNLIKELY(heldStats != NULL)) {
                  heldStats->holdStart = Hostinfo_SystemTimerNS();
               }
            }
         }
      } else {
         MXRecLockAcquire(&lock->recursiveLock,
                          NULL);  // non-stats
      }
   }
}
Example #12
0
void
MXUserDumpSemaphore(MXUserHeader *header)  // IN:
{
   MXUserSemaphore *sema = (MXUserSemaphore *) header;

   Warning("%s: semaphore @ %p\n", __FUNCTION__, sema);

   Warning("\tsignature 0x%X\n", sema->header.signature);
   Warning("\tname %s\n", sema->header.name);
   Warning("\trank 0x%X\n", sema->header.rank);
   Warning("\tserial number %u\n", sema->header.serialNumber);

   Warning("\treference count %u\n", Atomic_Read(&sema->activeUserCount));
   Warning("\taddress of native semaphore %p\n", &sema->nativeSemaphore);
}
Example #13
0
static INLINE VThreadBaseData *
VThreadBaseRaw(void)
{
   VThreadBaseKeyType key = Atomic_Read(&vthreadBaseGlobals.key);

   if (UNLIKELY(key == VTHREADBASE_INVALID_KEY)) {
      key = VThreadBaseGetKey(); /* Non-inlined slow path */
   }

#if defined _WIN32
   return (VThreadBaseData *) TlsGetValue(key);
#else
   return (VThreadBaseData *) pthread_getspecific(key);
#endif
}
Example #14
0
void
MXUser_DestroyRWLock(MXUserRWLock *lock)  // IN:
{
   if (LIKELY(lock != NULL)) {
      MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW);

      if (Atomic_Read(&lock->holderCount) != 0) {
         MXUserDumpAndPanic(&lock->header,
                            "%s: Destroy on an acquired read-write lock\n",
                            __FUNCTION__);
      }

      if (LIKELY(lock->useNative)) {
         int err = MXUserNativeRWDestroy(&lock->nativeLock);

         if (UNLIKELY(err != 0)) {
            MXUserDumpAndPanic(&lock->header, "%s: Internal error (%d)\n",
                               __FUNCTION__, err);
         }
      }

      lock->header.signature = 0;  // just in case...

      MXRecLockDestroy(&lock->recursiveLock);

      MXUserRemoveFromList(&lock->header);

      if (vmx86_stats) {
         MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem);

         if (LIKELY(stats != NULL)) {
            MXUserAcquisitionStatsTearDown(&stats->acquisitionStats);
            MXUserHistoTearDown(Atomic_ReadPtr(&stats->acquisitionHisto));

            MXUserBasicStatsTearDown(&stats->heldStats);
            MXUserHistoTearDown(Atomic_ReadPtr(&stats->heldHisto));

            free(stats);
         }
      }

      HashTable_FreeUnsafe(lock->holderTable);
      free(lock->header.name);
      lock->header.name = NULL;
      free(lock);
   }
}
Example #15
0
Bool
MXUser_IsCurThreadHoldingRecLock(MXUserRecLock *lock)  // IN:
{
   Bool result;

   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC);
   ASSERT(Atomic_Read(&lock->refCount) > 0);

   if (UNLIKELY(lock->vmmLock != NULL)) {
      ASSERT(MXUserMX_IsLockedByCurThreadRec);
      result = (*MXUserMX_IsLockedByCurThreadRec)(lock->vmmLock);
   } else {
      result = MXRecLockIsOwner(&lock->recursiveLock);
   }

   return result;
}
Example #16
0
void
MXUser_DestroySemaphore(MXUserSemaphore *sema)  // IN:
{
   if (LIKELY(sema != NULL)) {
      int err;

      MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA);

      if (Atomic_Read(&sema->activeUserCount) != 0) {
         MXUserDumpAndPanic(&sema->header,
                            "%s: Attempted destroy on semaphore while in use\n",
                            __FUNCTION__);
      }

      sema->header.signature = 0;  // just in case...

      err = MXUserDestroy(&sema->nativeSemaphore);

      if (UNLIKELY(err != 0)) {
         MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n",
                            __FUNCTION__, err);
      }

      MXUserRemoveFromList(&sema->header);

      if (vmx86_stats) {
         MXUserAcquireStats *acquireStats;

         acquireStats = Atomic_ReadPtr(&sema->acquireStatsMem);

         if (LIKELY(acquireStats != NULL)) {
            MXUserAcquisitionStatsTearDown(&acquireStats->data);
            MXUserHistoTearDown(Atomic_ReadPtr(&acquireStats->histo));

            free(acquireStats);
         }
      }

      free(sema->header.name);
      sema->header.name = NULL;
      free(sema);
   }
}
Example #17
0
static void
VMCIUtilCidUpdate(VMCIId subID,               // IN:
                  VMCI_EventData *eventData,  // IN:
                  void *clientData)           // IN:
{
   VMCIEventPayload_Context *evPayload = VMCIEventDataPayload(eventData);

   if (subID != ctxUpdateSubID) {
      VMCI_DEBUG_LOG(4, (LGPFX"Invalid subscriber (ID=0x%x).\n", subID));
      return;
   }
   if (eventData == NULL || evPayload->contextID == VMCI_INVALID_ID) {
      VMCI_DEBUG_LOG(4, (LGPFX"Invalid event data.\n"));
      return;
   }
   VMCI_LOG((LGPFX"Updating context from (ID=0x%x) to (ID=0x%x) on event "
             "(type=%d).\n", Atomic_Read(&vmContextID), evPayload->contextID,
             eventData->event));
   Atomic_Write(&vmContextID, evPayload->contextID);
}
Example #18
0
void
VThreadBase_SetNoIDFunc(void (*hookFunc)(void),       // IN: new hook function
                        void (*destroyFunc)(void *))  // IN/OPT: new TLS destructor
{
   ASSERT(hookFunc);

   /*
    * The hook function can only be set once, and must be set before
    * any VThreadIDs are allocated so that the hook can control the VThreadID
    * namespace.
    *
    * If the process has had only a single thread, that thread can be forgotten
    * via VThreadBase_ForgetSelf() and this function safely called.
    */
   ASSERT(vthreadBaseGlobals.noIDFunc == VThreadBaseSimpleNoID &&
          Atomic_Read(&vthreadBaseGlobals.numThreads) == 0);

   vthreadBaseGlobals.noIDFunc = hookFunc;
   vthreadBaseGlobals.freeIDFunc = destroyFunc;
}
Example #19
0
void
MXUserDumpRWLock(MXUserHeader *header)  // IN:
{
   MXUserRWLock *lock = (MXUserRWLock *) header;

   Warning("%s: Read-write lock @ 0x%p\n", __FUNCTION__, lock);

   Warning("\tsignature 0x%X\n", lock->header.signature);
   Warning("\tname %s\n", lock->header.name);
   Warning("\trank 0x%X\n", lock->header.rank);
   Warning("\tserial number %u\n", lock->header.serialNumber);

   if (LIKELY(lock->useNative)) {
      Warning("\taddress of native lock 0x%p\n", &lock->nativeLock);
   } else {
      Warning("\tcount %d\n", MXRecLockCount(&lock->recursiveLock));
   }

   Warning("\tholderCount %d\n", Atomic_Read(&lock->holderCount));
}
Example #20
0
void
MXUser_DestroyCondVar(MXUserCondVar *condVar)  // IN:
{
   if (condVar != NULL) {
      ASSERT(condVar->signature == MXUserGetSignature(MXUSER_TYPE_CONDVAR));

      if (Atomic_Read(&condVar->referenceCount) != 0) {
         Panic("%s: Attempted destroy on active condVar (0x%p; %s)\n",
               __FUNCTION__, condVar, condVar->header->name);
      }

      condVar->signature = 0;  // just in case...

      MXUserDestroyInternal(condVar);

      condVar->header = NULL;
      condVar->ownerLock = NULL;

      free(condVar);
   }
}
Example #21
0
static void
MXUserDumpRecLock(MXUserHeader *header)  // IN:
{
   MXUserRecLock *lock = (MXUserRecLock *) header;

   Warning("%s: Recursive lock @ %p\n", __FUNCTION__, lock);

   Warning("\tsignature 0x%X\n", lock->header.signature);
   Warning("\tname %s\n", lock->header.name);
   Warning("\trank 0x%X\n", lock->header.rank);
   Warning("\tserial number %u\n", lock->header.bits.serialNumber);
   Warning("\treference count %u\n", Atomic_Read(&lock->refCount));

   if (lock->vmmLock == NULL) {
      Warning("\tlock count %d\n", MXRecLockCount(&lock->recursiveLock));

      Warning("\taddress of owner data %p\n",
              &lock->recursiveLock.nativeThreadID);
   } else {
      Warning("\tvmmLock %p\n", lock->vmmLock);
   }
}
Example #22
0
Bool
MXUser_TryAcquireRecLock(MXUserRecLock *lock)  // IN/OUT:
{
   Bool success;

   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_REC);
   ASSERT(Atomic_Read(&lock->refCount) > 0);

   if (UNLIKELY(lock->vmmLock != NULL)) {
      ASSERT(MXUserMX_TryLockRec);
      success = (*MXUserMX_TryLockRec)(lock->vmmLock);
   } else {
      if (MXUserTryAcquireFail(lock->header.name)) {
         success = FALSE;
         goto bail;
      }

      success = MXRecLockTryAcquire(&lock->recursiveLock);

      if (success) {
         MXUserAcquisitionTracking(&lock->header, FALSE);
      }

      if (vmx86_stats) {
         MXUserAcquireStats *acquireStats;

         acquireStats = Atomic_ReadPtr(&lock->acquireStatsMem);

         if (LIKELY(acquireStats != NULL)) {
            MXUserAcquisitionSample(&acquireStats->data, success, !success,
                                    0ULL);
         }
      }
   }

bail:
   return success;
}
Example #23
0
static void
VThreadBaseSimpleNoID(void)
{
   VThreadID newID;
   Bool reused = FALSE;
   Bool result;
   void *newNative = VThreadBaseGetNative();
   HashTable *ht = VThreadBaseGetNativeHash();
   VThreadBaseData *base;

   /* Require key allocation before TLS read */
   VThreadBaseGetKey();

   /* Before allocating a new ID, try to reclaim any old IDs. */
   for (newID = 0;
        newID < Atomic_Read(&vthreadBaseGlobals.dynamicID);
        newID++) {
      void *newKey = (void *)(uintptr_t)newID;

      /*
       * Windows: any entry that is found and not (alive or NULL)
       *    is reclaimable.  The check is slightly racy, but the race
       *    would only cause missing a reclaim which isn't a problem.
       * Posix: thread exit is hooked (via TLS destructor) and sets
       *    entries to NULL, so any entry that is NULL is reclaimable.
       */
#ifdef _WIN32
      void *oldNative;
      reused = HashTable_Lookup(ht, newKey, &oldNative) &&
               (oldNative == NULL ||
                !VThreadBaseNativeIsAlive(oldNative)) &&
               HashTable_ReplaceIfEqual(ht, newKey, oldNative, newNative);
#else
      reused = HashTable_ReplaceIfEqual(ht, newKey, NULL, newNative);
#endif
      if (reused) {
         break;
      }
   }

   if (!reused) {
      void *newKey;

      newID = Atomic_FetchAndInc(&vthreadBaseGlobals.dynamicID);
      /*
       * Detect VThreadID overflow (~0 is used as a sentinel).
       * Leave a space of ~10 IDs, since the increment and bounds-check
       * are not atomic.
       */
      ASSERT_NOT_IMPLEMENTED(newID < VTHREAD_INVALID_ID - 10);

      newKey = (void *)(uintptr_t)newID;
      result = HashTable_Insert(ht, newKey, newNative);
      ASSERT_NOT_IMPLEMENTED(result);
   }

   /* ID picked.  Now do the important stuff. */
   base = Util_SafeCalloc(1, sizeof *base);
   base->id = newID;
   Str_Sprintf(base->name, sizeof base->name, "vthread-%u", newID);

   result = VThreadBase_InitWithTLS(base);
   ASSERT(result);

   if (vmx86_debug && reused) {
      Log("VThreadBase reused VThreadID %d.\n", newID);
   }

   if (Atomic_Read(&vthreadBaseGlobals.numThreads) > 1) {
      LOG_ONCE(("VThreadBase detected multiple threads.\n"));
   }
}
Example #24
0
void
MXUser_ReleaseRWLock(MXUserRWLock *lock)  // IN/OUT:
{
   HolderContext *myContext;

   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW);

   myContext = MXUserGetHolderContext(lock);

   if (vmx86_stats) {
      MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem);

      if (LIKELY(stats != NULL)) {
         MXUserHisto *histo;
         VmTimeType duration = Hostinfo_SystemTimerNS() - myContext->holdStart;

         /*
          * The statistics are not always atomically safe so protect them
          * when necessary
          */

         if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) {
            MXRecLockAcquire(&lock->recursiveLock,
                             NULL);  // non-stats
         }

         MXUserBasicStatsSample(&stats->heldStats, duration);

         histo = Atomic_ReadPtr(&stats->heldHisto);

         if (UNLIKELY(histo != NULL)) {
            MXUserHistoSample(histo, duration, GetReturnAddress());
         }

         if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) {
            MXRecLockRelease(&lock->recursiveLock);
         }
      }
   }

   if (UNLIKELY(myContext->state == RW_UNLOCKED)) {
      uint32 lockCount = Atomic_Read(&lock->holderCount);

      MXUserDumpAndPanic(&lock->header,
                         "%s: Non-owner release of an %s read-write lock\n",
                         __FUNCTION__,
                         lockCount == 0 ? "unacquired" : "acquired");
   }

   MXUserReleaseTracking(&lock->header);

   Atomic_Dec(&lock->holderCount);

   if (LIKELY(lock->useNative)) {
      int err = MXUserNativeRWRelease(&lock->nativeLock,
                                      myContext->state == RW_LOCKED_FOR_READ);

      if (UNLIKELY(err != 0)) {
         MXUserDumpAndPanic(&lock->header, "%s: Internal error (%d)\n",
                            __FUNCTION__, err);
      }
   } else {
      ASSERT(Atomic_Read(&lock->holderCount) == 0);
      MXRecLockRelease(&lock->recursiveLock);
   }

   myContext->state = RW_UNLOCKED;
}
Example #25
0
static INLINE void
MXUserAcquisition(MXUserRWLock *lock,  // IN/OUT:
                  Bool forRead)        // IN:
{
   HolderContext *myContext;

   ASSERT(lock);
   MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW);

   MXUserAcquisitionTracking(&lock->header, TRUE);

   myContext = MXUserGetHolderContext(lock);

   if (UNLIKELY(myContext->state != RW_UNLOCKED)) {
      MXUserDumpAndPanic(&lock->header,
                         "%s: AcquireFor%s after AcquireFor%s\n",
                         __FUNCTION__,
                        forRead ? "Read" : "Write",
                        (myContext->state == RW_LOCKED_FOR_READ) ? "Read" :
                                                                   "Write");
   }

   if (vmx86_stats) {
      VmTimeType value;
      MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem);

      if (lock->useNative) {
         int err = 0;
         Bool contended;
         VmTimeType begin = Hostinfo_SystemTimerNS();

         contended = MXUserNativeRWAcquire(&lock->nativeLock, forRead, &err);

         value = contended ? Hostinfo_SystemTimerNS() - begin : 0;

         if (UNLIKELY(err != 0)) {
            MXUserDumpAndPanic(&lock->header, "%s: Error %d: contended %d\n",
                               __FUNCTION__, err, contended);
         }
      } else {
         value = 0;

         MXRecLockAcquire(&lock->recursiveLock,
                          (stats == NULL) ? NULL : &value);
      }

      if (LIKELY(stats != NULL)) {
         MXUserHisto *histo;

         /*
          * The statistics are not atomically safe so protect them when
          * necessary.
          */

         if (forRead && lock->useNative) {
            MXRecLockAcquire(&lock->recursiveLock,
                             NULL);  // non-stats
         }

         MXUserAcquisitionSample(&stats->acquisitionStats, TRUE, value != 0,
                                 value);

         histo = Atomic_ReadPtr(&stats->acquisitionHisto);

         if (UNLIKELY(histo != NULL)) {
            MXUserHistoSample(histo, value, GetReturnAddress());
         }

         if (forRead && lock->useNative) {
            MXRecLockRelease(&lock->recursiveLock);
         }

         myContext->holdStart = Hostinfo_SystemTimerNS();
      }
   } else {
      if (LIKELY(lock->useNative)) {
         int err = 0;

         MXUserNativeRWAcquire(&lock->nativeLock, forRead, &err);

         if (UNLIKELY(err != 0)) {
            MXUserDumpAndPanic(&lock->header, "%s: Error %d\n",
                               __FUNCTION__, err);
         }
      } else {
         MXRecLockAcquire(&lock->recursiveLock,
                          NULL);  // non-stats
      }
   }

   if (!forRead || !lock->useNative) {
      ASSERT(Atomic_Read(&lock->holderCount) == 0);
   }

   Atomic_Inc(&lock->holderCount);
   myContext->state = forRead ? RW_LOCKED_FOR_READ : RW_LOCKED_FOR_WRITE;
}