RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER); Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL)); struct RTSEMEVENTINTERNAL *pThis; if (!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)) pThis = (struct RTSEMEVENTINTERNAL *)RTMemAlloc(sizeof(*pThis)); else pThis = (struct RTSEMEVENTINTERNAL *)rtMemBaseAlloc(sizeof(*pThis)); if (!pThis) return VERR_NO_MEMORY; /* * Create the semaphore. * (Auto reset, not signaled, private event object.) */ pThis->hev = CreateEvent(NULL, FALSE, FALSE, NULL); if (pThis->hev != NULL) /* not INVALID_HANDLE_VALUE */ { pThis->u32Magic = RTSEMEVENT_MAGIC; pThis->fFlags = fFlags; #ifdef RTSEMEVENT_STRICT if (!pszNameFmt) { static uint32_t volatile s_iSemEventAnon = 0; RTLockValidatorRecSharedInit(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis, true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL), "RTSemEvent-%u", ASMAtomicIncU32(&s_iSemEventAnon) - 1); } else { va_list va; va_start(va, pszNameFmt); RTLockValidatorRecSharedInitV(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis, true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL), pszNameFmt, va); va_end(va); } pThis->fEverHadSignallers = false; #else RT_NOREF_PV(hClass); RT_NOREF_PV(pszNameFmt); #endif *phEventSem = pThis; return VINF_SUCCESS; } DWORD dwErr = GetLastError(); if (!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)) RTMemFree(pThis); else rtMemBaseFree(pThis); return RTErrConvertFromWin32(dwErr); }
RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER); Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL)); /* * Allocate semaphore handle. */ struct RTSEMEVENTINTERNAL *pThis; if (!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)) pThis = (struct RTSEMEVENTINTERNAL *)RTMemAlloc(sizeof(struct RTSEMEVENTINTERNAL)); else pThis = (struct RTSEMEVENTINTERNAL *)rtMemBaseAlloc(sizeof(struct RTSEMEVENTINTERNAL)); if (pThis) { pThis->iMagic = RTSEMEVENT_MAGIC; pThis->cWaiters = 0; pThis->fSignalled = 0; pThis->fFlags = fFlags; #ifdef RTSEMEVENT_STRICT if (!pszNameFmt) { static uint32_t volatile s_iSemEventAnon = 0; RTLockValidatorRecSharedInit(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis, true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL), "RTSemEvent-%u", ASMAtomicIncU32(&s_iSemEventAnon) - 1); } else { va_list va; va_start(va, pszNameFmt); RTLockValidatorRecSharedInitV(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis, true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL), pszNameFmt, va); va_end(va); } pThis->fEverHadSignallers = false; #endif *phEventSem = pThis; return VINF_SUCCESS; } return VERR_NO_MEMORY; }
/** * Worker for RTHeapPageAlloc. * * @returns IPRT status code * @param pHeap The heap - locked. * @param cPages The page count. * @param pszTag The tag. * @param fZero Whether to zero the memory. * @param ppv Where to return the address of the allocation * on success. */ static int rtHeapPageAllocLocked(PRTHEAPPAGE pHeap, size_t cPages, const char *pszTag, bool fZero, void **ppv) { int rc; /* * Use the hints first. */ if (pHeap->pHint1) { rc = rtHeapPageAllocFromBlock(pHeap->pHint1, cPages, fZero, ppv); if (rc != VERR_NO_MEMORY) return rc; } if (pHeap->pHint2) { rc = rtHeapPageAllocFromBlock(pHeap->pHint2, cPages, fZero, ppv); if (rc != VERR_NO_MEMORY) return rc; } /* * Search the heap for a block with enough free space. * * N.B. This search algorithm is not optimal at all. What (hopefully) saves * it are the two hints above. */ if (pHeap->cFreePages >= cPages) { RTHEAPPAGEALLOCARGS Args; Args.cPages = cPages; Args.pvAlloc = NULL; Args.fZero = fZero; RTAvlrPVDoWithAll(&pHeap->BlockTree, true /*fFromLeft*/, rtHeapPageAllocCallback, &Args); if (Args.pvAlloc) { *ppv = Args.pvAlloc; return VINF_SUCCESS; } } /* * Didn't find anytyhing, so expand the heap with a new block. */ RTCritSectLeave(&pHeap->CritSect); void *pvPages; pvPages = mmap(NULL, RTMEMPAGEPOSIX_BLOCK_SIZE, PROT_READ | PROT_WRITE | (pHeap->fExec ? PROT_EXEC : 0), MAP_PRIVATE | MAP_ANONYMOUS, -1, 0); if (pvPages == MAP_FAILED) { RTCritSectEnter(&pHeap->CritSect); return RTErrConvertFromErrno(errno); } /** @todo Eliminate this rtMemBaseAlloc dependency! */ PRTHEAPPAGEBLOCK pBlock = (PRTHEAPPAGEBLOCK)rtMemBaseAlloc(sizeof(*pBlock)); if (!pBlock) { munmap(pvPages, RTMEMPAGEPOSIX_BLOCK_SIZE); RTCritSectEnter(&pHeap->CritSect); return VERR_NO_MEMORY; } RT_ZERO(*pBlock); pBlock->Core.Key = pvPages; pBlock->Core.KeyLast = (uint8_t *)pvPages + RTMEMPAGEPOSIX_BLOCK_SIZE - 1; pBlock->cFreePages = RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT; pBlock->pHeap = pHeap; RTCritSectEnter(&pHeap->CritSect); bool fRc = RTAvlrPVInsert(&pHeap->BlockTree, &pBlock->Core); Assert(fRc); NOREF(fRc); pHeap->cFreePages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT; pHeap->cHeapPages += RTMEMPAGEPOSIX_BLOCK_PAGE_COUNT; /* * Grab memory from the new block (cannot fail). */ rc = rtHeapPageAllocFromBlock(pBlock, cPages, fZero, ppv); Assert(rc == VINF_SUCCESS); return rc; }
RTDECL(int) RTSemEventCreateEx(PRTSEMEVENT phEventSem, uint32_t fFlags, RTLOCKVALCLASS hClass, const char *pszNameFmt, ...) { AssertReturn(!(fFlags & ~(RTSEMEVENT_FLAGS_NO_LOCK_VAL | RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)), VERR_INVALID_PARAMETER); Assert(!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK) || (fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL)); /* * Allocate semaphore handle. */ int rc; struct RTSEMEVENTINTERNAL *pThis; if (!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)) pThis = (struct RTSEMEVENTINTERNAL *)RTMemAlloc(sizeof(*pThis)); else pThis = (struct RTSEMEVENTINTERNAL *)rtMemBaseAlloc(sizeof(*pThis)); if (pThis) { /* * Create the condition variable. */ rc = pthread_cond_init(&pThis->Cond, NULL); if (!rc) { /* * Create the semaphore. */ rc = pthread_mutex_init(&pThis->Mutex, NULL); if (!rc) { ASMAtomicWriteU32(&pThis->u32State, EVENT_STATE_NOT_SIGNALED); ASMAtomicWriteU32(&pThis->cWaiters, 0); pThis->fFlags = fFlags; #ifdef RTSEMEVENT_STRICT if (!pszNameFmt) { static uint32_t volatile s_iSemEventAnon = 0; RTLockValidatorRecSharedInit(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis, true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL), "RTSemEvent-%u", ASMAtomicIncU32(&s_iSemEventAnon) - 1); } else { va_list va; va_start(va, pszNameFmt); RTLockValidatorRecSharedInitV(&pThis->Signallers, hClass, RTLOCKVAL_SUB_CLASS_ANY, pThis, true /*fSignaller*/, !(fFlags & RTSEMEVENT_FLAGS_NO_LOCK_VAL), pszNameFmt, va); va_end(va); } pThis->fEverHadSignallers = false; #else RT_NOREF_PV(hClass); RT_NOREF_PV(pszNameFmt); #endif *phEventSem = pThis; return VINF_SUCCESS; } pthread_cond_destroy(&pThis->Cond); } rc = RTErrConvertFromErrno(rc); if (!(fFlags & RTSEMEVENT_FLAGS_BOOTSTRAP_HACK)) RTMemFree(pThis); else rtMemBaseFree(pThis); } else rc = VERR_NO_MEMORY; return rc; }