void ReadWriteLock_ReleaseRead( _Inout_ ReadWriteLock* self) { volatile LockFields* lock = (LockFields*)self; size_t state, key; state = Atomic_Dec(LockState(lock)); if (state >= OWN_EXCLUSIVE && LockOwners(state) == 0) { /* There is a queue. * Writers may be blocked waiting for us to leave. */ key = (size_t)lock ^ LockExit(state); CondLock_Broadcast(key); //if (((LockEntry(state) - LockExit(state)) & (FIELD_SIZE - 1)) == 2 && if (LockEntry(state) - LockExit(state) >= 2 && ((CurrentTick() - LockUnfair(state)) & 14) == 0) { /* Under certain conditions, encourage the last group of threads in * line to stop spinning and acquire unfairly. */ if (LockEntry(state) == LockWriter(state)) key = (size_t)lock ^ (LockEntry(state) - 1); else key = (size_t)lock ^ LockWriter(state); CondLock_BroadcastSpinners(key); } } }
Bool MXUser_TryDownSemaphore(MXUserSemaphore *sema) // IN/OUT: { int err; Bool downOccurred = FALSE; ASSERT(sema); MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); Atomic_Inc(&sema->activeUserCount); err = MXUserTryDown(&sema->nativeSemaphore, &downOccurred); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } if (vmx86_stats) { MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&sema->acquireStatsMem); if (LIKELY(acquireStats != NULL)) { MXUserAcquisitionSample(&acquireStats->data, downOccurred, !downOccurred, 0ULL); } } Atomic_Dec(&sema->activeUserCount); return downOccurred; }
void MXUserWaitCondVar(MXUserHeader *header, // IN: MXRecLock *lock, // IN: MXUserCondVar *condVar, // IN: uint32 msecWait) // IN: { ASSERT(header); ASSERT(lock); ASSERT(condVar); ASSERT(condVar->signature == MXUserGetSignature(MXUSER_TYPE_CONDVAR)); if (condVar->ownerLock != lock) { Panic("%s: invalid use of lock %s with condVar (0x%p; %s)\n", __FUNCTION__, header->name, condVar, condVar->header->name); } if (vmx86_debug && !MXRecLockIsOwner(lock)) { Panic("%s: lock %s for condVar (0x%p) not owned\n", __FUNCTION__, condVar->header->name, condVar); } Atomic_Inc(&condVar->referenceCount); MXUserWaitInternal(lock, condVar, msecWait); Atomic_Dec(&condVar->referenceCount); }
static int TryAcquireRead( _Inout_ CachedLock* self, _Out_ void** token ) { volatile ptrdiff_t* master = &self->master; ptrdiff_t* latch; int cpu; cpu = CPU_GetCurrent() & s_cpuMask; latch = self->latches + LATCHES_PER_LINE * cpu; if (*master != 0) { /* Exclusive threads are already here. Short-circuit. */ *token = NULL; return 0; } Atomic_Inc(latch); if (*master != 0) { /* Exclusive threads might have missed our increment. */ if (Atomic_Dec(latch) == 0) ClearCPU(self, latch); *token = NULL; return 0; } /* Exclusive threads cannot miss that we are here. */ *token = latch; return 1; }
void MXUser_DownSemaphore(MXUserSemaphore *sema) // IN/OUT: { int err; ASSERT(sema); MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); Atomic_Inc(&sema->activeUserCount); MXUserAcquisitionTracking(&sema->header, TRUE); // rank checking if (vmx86_stats) { VmTimeType start = 0; Bool tryDownSuccess = FALSE; MXUserAcquireStats *acquireStats; acquireStats = Atomic_ReadPtr(&sema->acquireStatsMem); if (LIKELY(acquireStats != NULL)) { start = Hostinfo_SystemTimerNS(); } err = MXUserTryDown(&sema->nativeSemaphore, &tryDownSuccess); if (LIKELY(err == 0)) { if (!tryDownSuccess) { err = MXUserDown(&sema->nativeSemaphore); } } if (LIKELY((err == 0) && (acquireStats != NULL))) { MXUserHisto *histo; VmTimeType value = Hostinfo_SystemTimerNS() - start; MXUserAcquisitionSample(&acquireStats->data, TRUE, !tryDownSuccess, value); histo = Atomic_ReadPtr(&acquireStats->histo); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, value, GetReturnAddress()); } } } else { err = MXUserDown(&sema->nativeSemaphore); } if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } MXUserReleaseTracking(&sema->header); Atomic_Dec(&sema->activeUserCount); }
const char * VThreadBase_CurName(void) { static Atomic_Int curNameRecursion; VThreadBaseData *base = VThreadBaseRaw(); if (LIKELY(base != NULL)) { return base->name; } else if (Atomic_Read(&curNameRecursion) == 0) { /* Unnamed thread, try to name it. */ Atomic_Inc(&curNameRecursion); base = VThreadBaseCooked(); /* Assigns name as side effect */ Atomic_Dec(&curNameRecursion); return base->name; } else { /* * Unnamed thread, but naming it failed (recursed back to here). * The heuristic is not perfect (a second unnamed thread could * be looking for a name while the first thread names itself), * but getting here nonrecursively is unlikely and we cannot * do better about detection without thread-local storage, and * in the recursive case thread-local storage won't exist after * a failure to set up thread-local storage in the first place. * * This clause function should not ASSERT, Panic or call a Str_ * function (that can ASSERT or Panic), as the Panic handler is * very likey to query the thread name and end up right back here. * Thus, use a static buffer for a partly-sane name and hope the * Panic handler dumps enough information to figure out what went * wrong. */ static char name[48]; uintptr_t hostTid; #if defined(_WIN32) hostTid = GetCurrentThreadId(); #elif defined(__linux__) hostTid = pthread_self(); #elif defined(__APPLE__) hostTid = (uintptr_t)(void*)pthread_self(); #else hostTid = 0; #endif snprintf(name, sizeof name - 1 /* keep buffer NUL-terminated */, "host-%"FMTPD"u", hostTid); return name; } }
static void VMCIDatagramDelayedDispatchCB(void *data) // IN { Bool inDGHostQueue; VMCIDelayedDatagramInfo *dgInfo = (VMCIDelayedDatagramInfo *)data; ASSERT(data); dgInfo->entry->recvCB(dgInfo->entry->clientData, &dgInfo->msg); VMCIResource_Release(&dgInfo->entry->resource); inDGHostQueue = dgInfo->inDGHostQueue; VMCI_FreeKernelMem(dgInfo, sizeof *dgInfo + (size_t)dgInfo->msg.payloadSize); if (inDGHostQueue) { Atomic_Dec(&delayedDGHostQueueSize); } }
void MXUser_UpSemaphore(MXUserSemaphore *sema) // IN/OUT: { int err; ASSERT(sema); MXUserValidateHeader(&sema->header, MXUSER_TYPE_SEMA); Atomic_Inc(&sema->activeUserCount); err = MXUserUp(&sema->nativeSemaphore); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&sema->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } Atomic_Dec(&sema->activeUserCount); }
void CachedLock_ReleaseRead( _Inout_ CachedLock* self, _In_ void* token ) { ptrdiff_t* latch = (ptrdiff_t*)token; ptrdiff_t state; if (latch == NULL) { /* This thread acquired the central lock, not a latch. */ ReadWriteLock_ReleaseRead(&self->lock); return; } /* Decrement the same latch that got incremented before. */ state = Atomic_Dec(latch); if (state == 0 && self->master != 0) ClearCPU(self, latch); }
static void VThreadBaseSafeDeleteTLS(void *tlsData) { VThreadBaseData *data = tlsData; if (data != NULL) { if (vthreadBaseGlobals.freeIDFunc != NULL) { VThreadBaseKeyType key = VThreadBaseGetKey(); Bool success; VThreadBaseData tmpData = *data; /* * Cleanup routines (specifically, Log()) need to be called with * valid TLS, so switch to a stack-based TLS slot containing just * enough for the VThreadBase services, clean up, then clear the * TLS slot. */ #if defined _WIN32 success = TlsSetValue(key, &tmpData); #else success = pthread_setspecific(key, &tmpData) == 0; #endif ASSERT_NOT_IMPLEMENTED(success); if (vmx86_debug) { Log("Forgetting VThreadID %d (\"%s\").\n", data->id, data->name); } (*vthreadBaseGlobals.freeIDFunc)(data); #if defined _WIN32 success = TlsSetValue(key, NULL); #else success = pthread_setspecific(key, NULL) == 0; #endif ASSERT_NOT_IMPLEMENTED(success); } Atomic_Dec(&vthreadBaseGlobals.numThreads); } }
static int VMCIDatagramDispatchAsHost(VMCIId contextID, // IN: VMCIDatagram *dg) // IN: { int retval; size_t dgSize; VMCIPrivilegeFlags srcPrivFlags; ASSERT(dg); ASSERT(VMCI_HostPersonalityActive()); dgSize = VMCI_DG_SIZE(dg); if (contextID == VMCI_HOST_CONTEXT_ID && dg->dst.context == VMCI_HYPERVISOR_CONTEXT_ID) { VMCI_DEBUG_LOG(4, (LGPFX"Host cannot talk to hypervisor\n")); return VMCI_ERROR_DST_UNREACHABLE; } ASSERT(dg->dst.context != VMCI_HYPERVISOR_CONTEXT_ID); /* Chatty. */ // VMCI_DEBUG_LOG(10, (LGPFX"Sending from (handle=0x%x:0x%x) to " // "(handle=0x%x:0x%x) (size=%u bytes).\n", // dg->src.context, dg->src.resource, // dg->dst.context, dg->dst.resource, (uint32)dgSize)); /* * Check that source handle matches sending context. */ if (dg->src.context != contextID) { VMCI_DEBUG_LOG(4, (LGPFX"Sender context (ID=0x%x) is not owner of src " "datagram entry (handle=0x%x:0x%x).\n", contextID, dg->src.context, dg->src.resource)); return VMCI_ERROR_NO_ACCESS; } /* * Get hold of privileges of sending endpoint. */ retval = VMCIDatagramGetPrivFlagsInt(contextID, dg->src, &srcPrivFlags); if (retval != VMCI_SUCCESS) { VMCI_WARNING((LGPFX"Couldn't get privileges (handle=0x%x:0x%x).\n", dg->src.context, dg->src.resource)); return retval; } /* Determine if we should route to host or guest destination. */ if (dg->dst.context == VMCI_HOST_CONTEXT_ID) { /* Route to host datagram entry. */ DatagramEntry *dstEntry; VMCIResource *resource; if (dg->src.context == VMCI_HYPERVISOR_CONTEXT_ID && dg->dst.resource == VMCI_EVENT_HANDLER) { return VMCIEvent_Dispatch(dg); } resource = VMCIResource_Get(dg->dst, VMCI_RESOURCE_TYPE_DATAGRAM); if (resource == NULL) { VMCI_DEBUG_LOG(4, (LGPFX"Sending to invalid destination " "(handle=0x%x:0x%x).\n", dg->dst.context, dg->dst.resource)); return VMCI_ERROR_INVALID_RESOURCE; } dstEntry = RESOURCE_CONTAINER(resource, DatagramEntry, resource); if (VMCIDenyInteraction(srcPrivFlags, dstEntry->privFlags)) { VMCIResource_Release(resource); return VMCI_ERROR_NO_ACCESS; } ASSERT(dstEntry->recvCB); /* * If a VMCI datagram destined for the host is also sent by the * host, we always run it delayed. This ensures that no locks * are held when the datagram callback runs. */ if (dstEntry->runDelayed || (dg->src.context == VMCI_HOST_CONTEXT_ID && VMCI_CanScheduleDelayedWork())) { VMCIDelayedDatagramInfo *dgInfo; if (Atomic_FetchAndAdd(&delayedDGHostQueueSize, 1) == VMCI_MAX_DELAYED_DG_HOST_QUEUE_SIZE) { Atomic_Dec(&delayedDGHostQueueSize); VMCIResource_Release(resource); return VMCI_ERROR_NO_MEM; } dgInfo = VMCI_AllocKernelMem(sizeof *dgInfo + (size_t)dg->payloadSize, (VMCI_MEMORY_ATOMIC | VMCI_MEMORY_NONPAGED)); if (NULL == dgInfo) { Atomic_Dec(&delayedDGHostQueueSize); VMCIResource_Release(resource); return VMCI_ERROR_NO_MEM; } dgInfo->inDGHostQueue = TRUE; dgInfo->entry = dstEntry; memcpy(&dgInfo->msg, dg, dgSize); retval = VMCI_ScheduleDelayedWork(VMCIDatagramDelayedDispatchCB, dgInfo); if (retval < VMCI_SUCCESS) { VMCI_WARNING((LGPFX"Failed to schedule delayed work for datagram " "(result=%d).\n", retval)); VMCI_FreeKernelMem(dgInfo, sizeof *dgInfo + (size_t)dg->payloadSize); VMCIResource_Release(resource); Atomic_Dec(&delayedDGHostQueueSize); return retval; } } else { retval = dstEntry->recvCB(dstEntry->clientData, dg); VMCIResource_Release(resource); if (retval < VMCI_SUCCESS) { return retval; } } } else { /* * Route to destination VM context. */ VMCIDatagram *newDG; if (contextID != dg->dst.context) { if (VMCIDenyInteraction(srcPrivFlags, vmci_context_get_priv_flags(dg->dst.context))) { VMCI_DEBUG_LOG(4, (LGPFX"Interaction denied (%X/%X - %X/%X)\n", contextID, srcPrivFlags, dg->dst.context, vmci_context_get_priv_flags(dg->dst.context))); return VMCI_ERROR_NO_ACCESS; } else if (VMCI_CONTEXT_IS_VM(contextID)) { /* * If the sending context is a VM, it cannot reach another VM. */ if (!vmkernel) { VMCI_DEBUG_LOG(4, (LGPFX"Datagram communication between VMs not " "supported (src=0x%x, dst=0x%x).\n", contextID, dg->dst.context)); return VMCI_ERROR_DST_UNREACHABLE; } } } /* We make a copy to enqueue. */ newDG = VMCI_AllocKernelMem(dgSize, VMCI_MEMORY_NORMAL); if (newDG == NULL) { VMCI_DEBUG_LOG(4, (LGPFX"No memory for datagram\n")); return VMCI_ERROR_NO_MEM; } memcpy(newDG, dg, dgSize); retval = VMCIContext_EnqueueDatagram(dg->dst.context, newDG); if (retval < VMCI_SUCCESS) { VMCI_FreeKernelMem(newDG, dgSize); VMCI_DEBUG_LOG(4, (LGPFX"Enqueue failed\n")); return retval; } } /* The datagram is freed when the context reads it. */ /* Chatty. */ // VMCI_DEBUG_LOG(10, (LGPFX"Sent datagram (size=%u bytes).\n", // (uint32)dgSize)); /* * We currently truncate the size to signed 32 bits. This doesn't * matter for this handler as it only support 4Kb messages. */ return (int)dgSize; }
void MXUser_ReleaseRWLock(MXUserRWLock *lock) // IN/OUT: { HolderContext *myContext; ASSERT(lock); MXUserValidateHeader(&lock->header, MXUSER_TYPE_RW); myContext = MXUserGetHolderContext(lock); if (vmx86_stats) { MXUserStats *stats = Atomic_ReadPtr(&lock->statsMem); if (LIKELY(stats != NULL)) { MXUserHisto *histo; VmTimeType duration = Hostinfo_SystemTimerNS() - myContext->holdStart; /* * The statistics are not always atomically safe so protect them * when necessary */ if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) { MXRecLockAcquire(&lock->recursiveLock, NULL); // non-stats } MXUserBasicStatsSample(&stats->heldStats, duration); histo = Atomic_ReadPtr(&stats->heldHisto); if (UNLIKELY(histo != NULL)) { MXUserHistoSample(histo, duration, GetReturnAddress()); } if ((myContext->state == RW_LOCKED_FOR_READ) && lock->useNative) { MXRecLockRelease(&lock->recursiveLock); } } } if (UNLIKELY(myContext->state == RW_UNLOCKED)) { uint32 lockCount = Atomic_Read(&lock->holderCount); MXUserDumpAndPanic(&lock->header, "%s: Non-owner release of an %s read-write lock\n", __FUNCTION__, lockCount == 0 ? "unacquired" : "acquired"); } MXUserReleaseTracking(&lock->header); Atomic_Dec(&lock->holderCount); if (LIKELY(lock->useNative)) { int err = MXUserNativeRWRelease(&lock->nativeLock, myContext->state == RW_LOCKED_FOR_READ); if (UNLIKELY(err != 0)) { MXUserDumpAndPanic(&lock->header, "%s: Internal error (%d)\n", __FUNCTION__, err); } } else { ASSERT(Atomic_Read(&lock->holderCount) == 0); MXRecLockRelease(&lock->recursiveLock); } myContext->state = RW_UNLOCKED; }