static int tstTrafficThreadCommon(uintptr_t iThread, bool fNS) { for (uint32_t iLoop = 0; RTTimeMilliTS() - g_u64StartMilliTS < g_cSecs*1000; iLoop++) { /* fudge */ if ((iLoop % 223) == 223) RTThreadYield(); else if ((iLoop % 16127) == 16127) RTThreadSleep(1); if (fNS) { RTTEST_CHECK_RC(g_hTest,RTSemXRoadsNSEnter(g_hXRoads), VINF_SUCCESS); ASMAtomicIncU32(&g_cNSCrossings); RTTEST_CHECK_RC(g_hTest,RTSemXRoadsNSLeave(g_hXRoads), VINF_SUCCESS); } else { RTTEST_CHECK_RC(g_hTest,RTSemXRoadsEWEnter(g_hXRoads), VINF_SUCCESS); ASMAtomicIncU32(&g_cEWCrossings); RTTEST_CHECK_RC(g_hTest,RTSemXRoadsEWLeave(g_hXRoads), VINF_SUCCESS); } } return VINF_SUCCESS; }
RTDECL(int) RTSemEventSignal(RTSEMEVENT hEventSem) { PRTSEMEVENTINTERNAL pThis = (PRTSEMEVENTINTERNAL)hEventSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENT_MAGIC, ("u32Magic=%RX32 pThis=%p\n", pThis->u32Magic, pThis), VERR_INVALID_HANDLE); KernAcquireSpinLock(&pThis->Spinlock); if (pThis->cWaiters > 0) { ASMAtomicDecU32(&pThis->cWaiters); ASMAtomicIncU32(&pThis->cWaking); ULONG cThreads; KernWakeup((ULONG)pThis, WAKEUP_DATA | WAKEUP_ONE, &cThreads, VINF_SUCCESS); if (RT_UNLIKELY(!cThreads)) { /* shouldn't ever happen on OS/2 */ ASMAtomicXchgU8(&pThis->fSignaled, true); ASMAtomicDecU32(&pThis->cWaking); ASMAtomicIncU32(&pThis->cWaiters); } } else ASMAtomicXchgU8(&pThis->fSignaled, true); KernReleaseSpinLock(&pThis->Spinlock); return VINF_SUCCESS; }
USBLIB_DECL(int) USBLibInit(void) { LogFlow((USBLIBR3 ":USBLibInit\n")); /* * Already open? * This isn't properly serialized, but we'll be fine with the current usage. */ if (g_cUsers) { ASMAtomicIncU32(&g_cUsers); return VINF_SUCCESS; } RTFILE File; int rc = RTFileOpen(&File, VBOXUSB_DEVICE_NAME, RTFILE_O_READWRITE | RTFILE_O_OPEN | RTFILE_O_DENY_NONE); if (RT_FAILURE(rc)) { LogRel((USBLIBR3 ":RTFileOpen failed to open VBoxUSB device.rc=%d\n", rc)); return rc; } g_File = File; ASMAtomicIncU32(&g_cUsers); /* * Check the USBMonitor version. */ VBOXUSBREQ_GET_VERSION Req; bzero(&Req, sizeof(Req)); rc = usblibDoIOCtl(VBOXUSBMON_IOCTL_GET_VERSION, &Req, sizeof(Req)); if (RT_SUCCESS(rc)) { if ( Req.u32Major != VBOXUSBMON_VERSION_MAJOR || Req.u32Minor < VBOXUSBMON_VERSION_MINOR) { rc = VERR_VERSION_MISMATCH; LogRel((USBLIBR3 ":USBMonitor version mismatch! driver v%d.%d, expecting ~v%d.%d\n", Req.u32Major, Req.u32Minor, VBOXUSBMON_VERSION_MAJOR, VBOXUSBMON_VERSION_MINOR)); RTFileClose(File); g_File = NIL_RTFILE; ASMAtomicDecU32(&g_cUsers); return rc; } } else { LogRel((USBLIBR3 ":USBMonitor driver version query failed. rc=%Rrc\n", rc)); RTFileClose(File); g_File = NIL_RTFILE; ASMAtomicDecU32(&g_cUsers); return rc; } return VINF_SUCCESS; }
/** * * @returns 0 on success, errno on failure. * EBUSY if the device is used by someone else. * @param pDev The device node. * @param fOpen The open flags. * @param pTd The thread. * @param iDevType ??? */ static int vboxdrvFreeBSDOpenCommon(struct cdev *pDev, int fOpen, int iDevtype, struct thread *pTd, bool fUnrestricted) { PSUPDRVSESSION pSession; int rc; /* * Let's be a bit picky about the flags... */ if (fOpen != (FREAD | FWRITE /*=O_RDWR*/)) { Log(("VBoxDrvFreeBSDOpen: fOpen=%#x expected %#x\n", fOpen, O_RDWR)); return EINVAL; } /* * Create a new session. */ rc = supdrvCreateSession(&g_VBoxDrvFreeBSDDevExt, true /* fUser */, fUnrestricted, &pSession); if (RT_SUCCESS(rc)) { /** @todo get (r)uid and (r)gid. pSession->Uid = stuff; pSession->Gid = stuff; */ devfs_set_cdevpriv(pSession, VBoxDrvFreeBSDDtr); Log(("VBoxDrvFreeBSDOpen: pSession=%p\n", pSession)); ASMAtomicIncU32(&g_cUsers); return 0; } return RTErrConvertToErrno(rc); }
DECLHIDDEN(void) rtR0MpNotificationTerm(void) { PRTMPNOTIFYREG pHead; RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; RTSPINLOCK hSpinlock = g_hRTMpNotifySpinLock; AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK); rtR0MpNotificationNativeTerm(); /* pick up the list and the spinlock. */ RTSpinlockAcquire(hSpinlock, &Tmp); ASMAtomicWriteHandle(&g_hRTMpNotifySpinLock, NIL_RTSPINLOCK); pHead = g_pRTMpCallbackHead; g_pRTMpCallbackHead = NULL; ASMAtomicIncU32(&g_iRTMpGeneration); RTSpinlockRelease(hSpinlock, &Tmp); /* free the list. */ while (pHead) { PRTMPNOTIFYREG pFree = pHead; pHead = pHead->pNext; pFree->pNext = NULL; pFree->pfnCallback = NULL; RTMemFree(pFree); } RTSpinlockDestroy(hSpinlock); }
int vscsiIoReqUnmapEnqueue(PVSCSILUNINT pVScsiLun, PVSCSIREQINT pVScsiReq, PRTRANGE paRanges, unsigned cRanges) { int rc = VINF_SUCCESS; PVSCSIIOREQINT pVScsiIoReq = NULL; LogFlowFunc(("pVScsiLun=%#p pVScsiReq=%#p paRanges=%#p cRanges=%u\n", pVScsiLun, pVScsiReq, paRanges, cRanges)); pVScsiIoReq = (PVSCSIIOREQINT)RTMemAllocZ(sizeof(VSCSIIOREQINT)); if (!pVScsiIoReq) return VERR_NO_MEMORY; pVScsiIoReq->pVScsiReq = pVScsiReq; pVScsiIoReq->pVScsiLun = pVScsiLun; pVScsiIoReq->enmTxDir = VSCSIIOREQTXDIR_UNMAP; pVScsiIoReq->u.Unmap.paRanges = paRanges; pVScsiIoReq->u.Unmap.cRanges = cRanges; ASMAtomicIncU32(&pVScsiLun->IoReq.cReqOutstanding); rc = vscsiLunReqTransferEnqueue(pVScsiLun, pVScsiIoReq); if (RT_FAILURE(rc)) { ASMAtomicDecU32(&pVScsiLun->IoReq.cReqOutstanding); RTMemFree(pVScsiIoReq); } return rc; }
DECLHIDDEN(void) rtR0PowerNotificationTerm(void) { PRTPOWERNOTIFYREG pHead; RTSPINLOCK hSpinlock = g_hRTPowerNotifySpinLock; AssertReturnVoid(hSpinlock != NIL_RTSPINLOCK); /** @todo OS specific term here */ /* pick up the list and the spinlock. */ RTSpinlockAcquire(hSpinlock); ASMAtomicWriteHandle(&g_hRTPowerNotifySpinLock, NIL_RTSPINLOCK); pHead = g_pRTPowerCallbackHead; g_pRTPowerCallbackHead = NULL; ASMAtomicIncU32(&g_iRTPowerGeneration); RTSpinlockRelease(hSpinlock); /* free the list. */ while (pHead) { PRTPOWERNOTIFYREG pFree = pHead; pHead = pHead->pNext; pFree->pNext = NULL; pFree->pfnCallback = NULL; RTMemFree(pFree); } RTSpinlockDestroy(hSpinlock); }
int vboxNetAdpCreate(PINTNETTRUNKFACTORY pIfFactory, PVBOXNETADP *ppNew) { int rc; unsigned i; PVBOXNETADPGLOBALS pGlobals = (PVBOXNETADPGLOBALS)((uint8_t *)pIfFactory - RT_OFFSETOF(VBOXNETADPGLOBALS, TrunkFactory)); for (i = 0; i < RT_ELEMENTS(pGlobals->aAdapters); i++) { RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; PVBOXNETADP pThis = &pGlobals->aAdapters[i]; if (vboxNetAdpCheckAndSetState(pThis, kVBoxNetAdpState_Invalid, kVBoxNetAdpState_Transitional)) { /* Found an empty slot -- use it. */ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs); Assert(cRefs == 1); RTMAC Mac; vboxNetAdpComposeMACAddress(pThis, &Mac); rc = vboxNetAdpOsCreate(pThis, &Mac); *ppNew = pThis; RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); vboxNetAdpSetState(pThis, kVBoxNetAdpState_Available); RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp); return rc; } } /* All slots in adapter array are busy. */ return VERR_OUT_OF_RESOURCES; }
static int tstPDMACTestFileThread(PVM pVM, PPDMTHREAD pThread) { PPDMACTESTFILE pTestFile = (PPDMACTESTFILE)pThread->pvUser; int iWriteChance = 100; /* Chance to get a write task in percent. */ uint32_t cTasksStarted = 0; int rc = VINF_SUCCESS; if (pThread->enmState == PDMTHREADSTATE_INITIALIZING) return VINF_SUCCESS; while (pTestFile->fRunning) { unsigned iTaskCurr = 0; /* Fill all tasks */ while ( (pTestFile->cTasksActiveCurr < pTestFile->cTasksActiveMax) && (iTaskCurr < pTestFile->cTasksActiveMax)) { PPDMACTESTFILETASK pTask = &pTestFile->paTasks[iTaskCurr]; if (!pTask->fActive) { /* Read or write task? */ bool fWrite = tstPDMACTestIsTrue(iWriteChance); ASMAtomicIncU32(&pTestFile->cTasksActiveCurr); if (fWrite) rc = tstPDMACStressTestFileWrite(pTestFile, pTask); else rc = tstPDMACStressTestFileRead(pTestFile, pTask); if (rc != VINF_AIO_TASK_PENDING) tstPDMACStressTestFileTaskCompleted(pVM, pTask, pTestFile, rc); cTasksStarted++; } iTaskCurr++; } /* * Recalc write chance. The bigger the file the lower the chance to have a write. * The minimum chance is 33 percent. */ iWriteChance = 100 - (int)(((float)100.0 / pTestFile->cbFileMax) * (float)pTestFile->cbFileCurr); iWriteChance = RT_MAX(33, iWriteChance); /* Wait a random amount of time. (1ms - 100ms) */ RTThreadSleep(RTRandU32Ex(1, 100)); } /* Wait for the rest to complete. */ while (pTestFile->cTasksActiveCurr) RTThreadSleep(250); RTPrintf("Thread exiting: processed %u tasks\n", cTasksStarted); return rc; }
/** * Resumes the CPU timestamp counter ticking. * * @returns VINF_SUCCESS or VERR_TM_VIRTUAL_TICKING_IPE (asserted). * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. */ int tmCpuTickResumeLocked(PVM pVM, PVMCPU pVCpu) { if (!pVCpu->tm.s.fTSCTicking) { /* TSC must be ticking before calling tmCpuTickGetRawVirtual()! */ pVCpu->tm.s.fTSCTicking = true; uint32_t c = ASMAtomicIncU32(&pVM->tm.s.cTSCsTicking); AssertMsgReturn(c <= pVM->cCpus, ("%u vs %u\n", c, pVM->cCpus), VERR_TM_VIRTUAL_TICKING_IPE); if (c == 1) { /* The first VCPU to resume. */ uint64_t offTSCRawSrcOld = pVCpu->tm.s.offTSCRawSrc; STAM_COUNTER_INC(&pVM->tm.s.StatTSCResume); /* When resuming, use the TSC value of the last stopped VCPU to avoid the TSC going back. */ if (pVM->tm.s.enmTSCMode == TMTSCMODE_REAL_TSC_OFFSET) pVCpu->tm.s.offTSCRawSrc = SUPReadTsc() - pVM->tm.s.u64LastPausedTSC; else pVCpu->tm.s.offTSCRawSrc = tmCpuTickGetRawVirtual(pVM, false /* don't check for pending timers */) - pVM->tm.s.u64LastPausedTSC; /* Calculate the offset for other VCPUs to use. */ pVM->tm.s.offTSCPause = pVCpu->tm.s.offTSCRawSrc - offTSCRawSrcOld; } else { /* All other VCPUs (if any). */ pVCpu->tm.s.offTSCRawSrc += pVM->tm.s.offTSCPause; } } return VINF_SUCCESS; }
static int vgdrvFreeBSDOpen(struct cdev *pDev, int fOpen, struct thread *pTd) #endif { int rc; PVBOXGUESTSESSION pSession; LogFlow(("vgdrvFreeBSDOpen:\n")); /* * Try grab it (we don't grab the giant, remember). */ if (!ASMAtomicCmpXchgPtr(&pDev->si_drv1, (void *)0x42, NULL)) return EBUSY; /* * Create a new session. */ rc = VGDrvCommonCreateUserSession(&g_DevExt, &pSession); if (RT_SUCCESS(rc)) { if (ASMAtomicCmpXchgPtr(&pDev->si_drv1, pSession, (void *)0x42)) { Log(("vgdrvFreeBSDOpen: success - g_DevExt=%p pSession=%p rc=%d pid=%d\n", &g_DevExt, pSession, rc, (int)RTProcSelf())); ASMAtomicIncU32(&cUsers); return 0; } VGDrvCommonCloseSession(&g_DevExt, pSession); } LogRel(("vgdrvFreeBSDOpen: failed. rc=%d\n", rc)); return RTErrConvertToErrno(rc); }
INTNETR3DECL(int) SUPR0ObjAddRefEx(void *pvObj, PSUPDRVSESSION pSession, bool fNoBlocking) { RTTEST_CHECK_RET(g_hTest, pSession == g_pSession, VERR_INVALID_PARAMETER); POBJREF pRef = (POBJREF)pvObj; ASMAtomicIncU32(&pRef->cRefs); return VINF_SUCCESS; }
RTDECL(int) RTSemEventMultiDestroy(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; if (pThis == NIL_RTSEMEVENTMULTI) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); KernAcquireSpinLock(&pThis->Spinlock); ASMAtomicIncU32(&pThis->u32Magic); /* make the handle invalid */ if (pThis->cWaiters > 0) { /* abort waiting thread, last man cleans up. */ ASMAtomicXchgU32(&pThis->cWaking, pThis->cWaking + pThis->cWaiters); ULONG cThreads; KernWakeup((ULONG)pThis, WAKEUP_DATA | WAKEUP_BOOST, &cThreads, (ULONG)VERR_SEM_DESTROYED); KernReleaseSpinLock(&pThis->Spinlock); } else if (pThis->cWaking) /* the last waking thread is gonna do the cleanup */ KernReleaseSpinLock(&pThis->Spinlock); else { KernReleaseSpinLock(&pThis->Spinlock); KernFreeSpinLock(&pThis->Spinlock); RTMemFree(pThis); } return VINF_SUCCESS; }
/** @copydoc PDMISCSICONNECTOR::pfnSCSIRequestSend. */ static DECLCALLBACK(int) drvscsiRequestSend(PPDMISCSICONNECTOR pInterface, PPDMSCSIREQUEST pSCSIRequest) { int rc; PDRVSCSI pThis = PDMISCSICONNECTOR_2_DRVSCSI(pInterface); VSCSIREQ hVScsiReq; #ifdef DEBUG drvscsiDumpScsiRequest(pSCSIRequest); #endif rc = VSCSIDeviceReqCreate(pThis->hVScsiDevice, &hVScsiReq, pSCSIRequest->uLogicalUnit, pSCSIRequest->pbCDB, pSCSIRequest->cbCDB, pSCSIRequest->cbScatterGather, pSCSIRequest->cScatterGatherEntries, pSCSIRequest->paScatterGatherHead, pSCSIRequest->pbSenseBuffer, pSCSIRequest->cbSenseBuffer, pSCSIRequest); if (RT_FAILURE(rc)) return rc; ASMAtomicIncU32(&pThis->StatIoDepth); rc = VSCSIDeviceReqEnqueue(pThis->hVScsiDevice, hVScsiReq); return rc; }
int vscsiIoReqTransferEnqueue(PVSCSILUNINT pVScsiLun, PVSCSIREQINT pVScsiReq, VSCSIIOREQTXDIR enmTxDir, uint64_t uOffset, size_t cbTransfer) { int rc = VINF_SUCCESS; PVSCSIIOREQINT pVScsiIoReq = NULL; LogFlowFunc(("pVScsiLun=%#p pVScsiReq=%#p enmTxDir=%u uOffset=%llu cbTransfer=%u\n", pVScsiLun, pVScsiReq, enmTxDir, uOffset, cbTransfer)); pVScsiIoReq = (PVSCSIIOREQINT)RTMemAllocZ(sizeof(VSCSIIOREQINT)); if (!pVScsiIoReq) return VERR_NO_MEMORY; pVScsiIoReq->pVScsiReq = pVScsiReq; pVScsiIoReq->pVScsiLun = pVScsiLun; pVScsiIoReq->enmTxDir = enmTxDir; pVScsiIoReq->u.Io.uOffset = uOffset; pVScsiIoReq->u.Io.cbTransfer = cbTransfer; pVScsiIoReq->u.Io.paSeg = pVScsiReq->SgBuf.paSegs; pVScsiIoReq->u.Io.cSeg = pVScsiReq->SgBuf.cSegs; ASMAtomicIncU32(&pVScsiLun->IoReq.cReqOutstanding); rc = vscsiLunReqTransferEnqueue(pVScsiLun, pVScsiIoReq); if (RT_FAILURE(rc)) { ASMAtomicDecU32(&pVScsiLun->IoReq.cReqOutstanding); RTMemFree(pVScsiIoReq); } return rc; }
/** * Registers the VMM wide format types. * * Called by VMMR3Init, VMMR0Init and VMMRCInit. */ int vmmInitFormatTypes(void) { int rc = VINF_SUCCESS; if (ASMAtomicIncU32(&g_cFormatTypeUsers) == 1) rc = RTStrFormatTypeRegister("vmcpuset", vmmFormatTypeVmCpuSet, NULL); return rc; }
/** * Recycles a requst. * * @returns true if recycled, false if it should be freed. * @param pQueue The queue. * @param pReq The request. */ DECLHIDDEN(bool) rtReqQueueRecycle(PRTREQQUEUEINT pQueue, PRTREQINT pReq) { if ( !pQueue || pQueue->cReqFree >= 128) return false; ASMAtomicIncU32(&pQueue->cReqFree); PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)]; PRTREQ pNext; do { pNext = *ppHead; ASMAtomicWritePtr(&pReq->pNext, pNext); } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext)); return true; }
RTDECL(uint32_t) RTReqRetain(PRTREQ hReq) { PRTREQINT pReq = hReq; AssertPtrReturn(pReq, UINT32_MAX); AssertReturn(pReq->u32Magic == RTREQ_MAGIC, UINT32_MAX); return ASMAtomicIncU32(&pReq->cRefs); }
static void vboxVhwaHlpOverlayListAdd(PVBOXMP_DEVEXT pDevExt, PVBOXWDDM_OVERLAY pOverlay) { PVBOXWDDM_SOURCE pSource = &pDevExt->aSources[pOverlay->VidPnSourceId]; KIRQL OldIrql; KeAcquireSpinLock(&pSource->OverlayListLock, &OldIrql); ASMAtomicIncU32(&pSource->cOverlays); InsertHeadList(&pSource->OverlayList, &pOverlay->ListEntry); KeReleaseSpinLock(&pSource->OverlayListLock, OldIrql); }
RTDECL(uint32_t) RTCrCertCtxRetain(PCRTCRCERTCTX pCertCtx) { AssertPtrReturn(pCertCtx, UINT32_MAX); PRTCRCERTCTXINT pThis = RT_FROM_MEMBER(pCertCtx, RTCRCERTCTXINT, Public); AssertReturn(pThis->u32Magic == RTCRCERTCTXINT_MAGIC, UINT32_MAX); uint32_t cRet = ASMAtomicIncU32(&pThis->cRefs); Assert(cRet < 64); return cRet; }
RTDECL(uint32_t) RTThreadCtxHooksRetain(RTTHREADCTX hThreadCtx) { PRTTHREADCTXINT pThis = hThreadCtx; RTTHREADCTX_VALID_RETURN_RC(hThreadCtx, UINT32_MAX); uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs); Assert(cRefs < UINT32_MAX / 2); return cRefs; }
DECLINLINE(BOOLEAN) vboxWddmSwapchainRetainLocked(PVBOXWDDM_SWAPCHAIN pSwapchain) { if (pSwapchain->enmState == VBOXWDDM_OBJSTATE_TYPE_INITIALIZED) { ASMAtomicIncU32(&pSwapchain->cRefs); return TRUE; } return FALSE; }
RTDECL(uint32_t) RTKrnlModInfoRetain(RTKRNLMODINFO hKrnlModInfo) { PRTKRNLMODINFOINT pThis = hKrnlModInfo; AssertPtrReturn(pThis, UINT32_MAX); uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs); AssertMsg(cRefs > 1 && cRefs < _1M, ("%#x %p\n", cRefs, pThis)); return cRefs; }
RTDECL(int) RTSemMutexCreateEx(PRTSEMMUTEX phMutexSem, uint32_t fFlags, RTLOCKVALCLASS hClass, uint32_t uSubClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~RTSEMMUTEX_FLAGS_NO_LOCK_VAL), VERR_INVALID_PARAMETER); /* * Allocate semaphore handle. */ int rc; struct RTSEMMUTEXINTERNAL *pThis = (struct RTSEMMUTEXINTERNAL *)RTMemAlloc(sizeof(struct RTSEMMUTEXINTERNAL)); if (pThis) { /* * Create the semaphore. */ pthread_mutexattr_t MutexAttr; rc = pthread_mutexattr_init(&MutexAttr); if (!rc) { rc = pthread_mutex_init(&pThis->Mutex, &MutexAttr); if (!rc) { pthread_mutexattr_destroy(&MutexAttr); pThis->Owner = (pthread_t)-1; pThis->cNesting = 0; pThis->u32Magic = RTSEMMUTEX_MAGIC; #ifdef RTSEMMUTEX_STRICT if (!pszNameFmt) { static uint32_t volatile s_iMutexAnon = 0; RTLockValidatorRecExclInit(&pThis->ValidatorRec, hClass, uSubClass, pThis, !(fFlags & RTSEMMUTEX_FLAGS_NO_LOCK_VAL), "RTSemMutex-%u", ASMAtomicIncU32(&s_iMutexAnon) - 1); } else { va_list va; va_start(va, pszNameFmt); RTLockValidatorRecExclInitV(&pThis->ValidatorRec, hClass, uSubClass, pThis, !(fFlags & RTSEMMUTEX_FLAGS_NO_LOCK_VAL), pszNameFmt, va); va_end(va); } #endif *phMutexSem = pThis; return VINF_SUCCESS; } pthread_mutexattr_destroy(&MutexAttr); } RTMemFree(pThis); } else rc = VERR_NO_MEMORY; return rc; }
/** * Wrapper between the native darwin per-cpu callback and PFNRTWORKER * for the RTMpOnSpecific API. * * @param pvArg Pointer to the RTMPARGS package. */ static void rtmpOnSpecificDarwinWrapper(void *pvArg) { PRTMPARGS pArgs = (PRTMPARGS)pvArg; RTCPUID idCpu = cpu_number(); if (pArgs->idCpu == idCpu) { pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2); ASMAtomicIncU32(&pArgs->cHits); } }
RTDECL(uint32_t) RTCrDigestRetain(RTCRDIGEST hDigest) { PRTCRDIGESTINT pThis = hDigest; AssertPtrReturn(pThis, UINT32_MAX); AssertReturn(pThis->u32Magic == RTCRDIGESTINT_MAGIC, UINT32_MAX); uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs); Assert(cRefs < 64); return cRefs; }
RTDECL(uint32_t) RTCrStoreRetain(RTCRSTORE hStore) { PRTCRSTOREINT pThis = (PRTCRSTOREINT)hStore; AssertPtrReturn(pThis, UINT32_MAX); AssertReturn(pThis->u32Magic == RTCRSTOREINT_MAGIC, UINT32_MAX); uint32_t cRet = ASMAtomicIncU32(&pThis->cRefs); Assert(cRet < 8192); return cRet; }
RTR0DECL(uint32_t) RTR0DbgKrnlInfoRetain(RTDBGKRNLINFO hKrnlInfo) { PRTDBGKRNLINFOINT pThis = hKrnlInfo; AssertPtrReturn(pThis, UINT32_MAX); AssertMsgReturn(pThis->u32Magic == RTDBGKRNLINFO_MAGIC, ("%p: u32Magic=%RX32\n", pThis, pThis->u32Magic), UINT32_MAX); uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs); Assert(cRefs && cRefs < 100000); return cRefs; }
RTDECL(uint32_t) RTCrPkixSignatureRetain(RTCRPKIXSIGNATURE hSignature) { PRTCRPKIXSIGNATUREINT pThis = hSignature; AssertPtrReturn(pThis, UINT32_MAX); AssertReturn(pThis->u32Magic == RTCRPKIXSIGNATUREINT_MAGIC, UINT32_MAX); uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs); Assert(cRefs < 64); return cRefs; }
/** * Wrapper between the native Haiku per-cpu callback and PFNRTWORKER * for the RTMpOnSpecific API. * * @param pvArg Pointer to the RTMPARGS package. */ static void rtmpOnSpecificHaikuWrapper(void *pvArg, int current) { PRTMPARGS pArgs = (PRTMPARGS)pvArg; RTCPUID idCpu = current; if (pArgs->idCpu == idCpu) { pArgs->pfnWorker(idCpu, pArgs->pvUser1, pArgs->pvUser2); ASMAtomicIncU32(&pArgs->cHits); } }