/** * Grows the cache. * * @returns IPRT status code. * @param pThis The memory cache instance. */ static int rtMemCacheGrow(RTMEMCACHEINT *pThis) { /* * Enter the critical section here to avoid allocation races leading to * wasted memory (++) and make it easier to link in the new page. */ RTCritSectEnter(&pThis->CritSect); int rc = VINF_SUCCESS; if (pThis->cFree < 0) { /* * Allocate and initialize the new page. * * We put the constructor bitmap at the lower end right after cFree. * We then push the object array to the end of the page and place the * allocation bitmap below it. The hope is to increase the chance that * the allocation bitmap is in a different cache line than cFree since * this increases performance markably when lots of threads are beating * on the cache. */ PRTMEMCACHEPAGE pPage = (PRTMEMCACHEPAGE)RTMemPageAlloc(PAGE_SIZE); if (pPage) { uint32_t const cObjects = RT_MIN(pThis->cPerPage, pThis->cMax - pThis->cTotal); ASMMemZeroPage(pPage); pPage->pCache = pThis; pPage->pNext = NULL; pPage->cFree = cObjects; pPage->cObjects = cObjects; uint8_t *pb = (uint8_t *)(pPage + 1); pb = RT_ALIGN_PT(pb, 8, uint8_t *); pPage->pbmCtor = pb; pb = (uint8_t *)pPage + PAGE_SIZE - pThis->cbObject * cObjects; pPage->pbObjects = pb; Assert(RT_ALIGN_P(pb, pThis->cbAlignment) == pb); pb -= pThis->cBits / 8; pb = (uint8_t *)((uintptr_t)pb & ~(uintptr_t)7); pPage->pbmAlloc = pb; Assert((uintptr_t)pPage->pbmCtor + pThis->cBits / 8 <= (uintptr_t)pPage->pbmAlloc); /* Mark the bitmap padding and any unused objects as allocated. */ for (uint32_t iBit = cObjects; iBit < pThis->cBits; iBit++) ASMBitSet(pPage->pbmAlloc, iBit); /* Make it the hint. */ ASMAtomicWritePtr(&pThis->pPageHint, pPage); /* Link the page in at the end of the list. */ ASMAtomicWritePtr(pThis->ppPageNext, pPage); pThis->ppPageNext = &pPage->pNext; /* Add it to the page counts. */ ASMAtomicAddS32(&pThis->cFree, cObjects); ASMAtomicAddU32(&pThis->cTotal, cObjects); } else
RTDECL(int) RTSemMutexDestroy(RTSEMMUTEX hMutexSem) { /* * Validate. */ RTSEMMUTEXINTERNAL *pThis = hMutexSem; if (pThis == NIL_RTSEMMUTEX) return VINF_SUCCESS; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMMUTEX_MAGIC, VERR_INVALID_HANDLE); /* * Close semaphore handle. */ AssertReturn(ASMAtomicCmpXchgU32(&pThis->u32Magic, RTSEMMUTEX_MAGIC_DEAD, RTSEMMUTEX_MAGIC), VERR_INVALID_HANDLE); HANDLE hMtx = pThis->hMtx; ASMAtomicWritePtr(&pThis->hMtx, INVALID_HANDLE_VALUE); int rc = VINF_SUCCESS; if (!CloseHandle(hMtx)) { rc = RTErrConvertFromWin32(GetLastError()); AssertMsgFailed(("%p rc=%d lasterr=%d\n", pThis->hMtx, rc, GetLastError())); } #ifdef RTSEMMUTEX_STRICT RTLockValidatorRecExclDelete(&pThis->ValidatorRec); #endif RTMemFree(pThis); return rc; }
/** * Sets the user argument for a type. * * This can be used if a user argument needs relocating in GC. * * @returns IPRT status code. * @retval VINF_SUCCESS on success. * @retval VERR_FILE_NOT_FOUND if not found. * * @param pszType The type to update. * @param pvUser The new user argument value. */ RTDECL(int) RTStrFormatTypeSetUser(const char *pszType, void *pvUser) { int32_t i; /* * Validate input. */ AssertPtr(pszType); /* * Locate the entry and update it. */ rtstrFormatTypeReadLock(); i = rtstrFormatTypeLookup(pszType, strlen(pszType)); if (i >= 0) ASMAtomicWritePtr(&g_aTypes[i].pvUser, pvUser); rtstrFormatTypeReadUnlock(); Assert(i >= 0); return i >= 0 ? VINF_SUCCESS : VERR_FILE_NOT_FOUND; /** @todo fix status code */ }
/** * Attach network filter driver from bandwidth group. * * @returns VBox status code. * @param pUVM The user mode VM structure. * @param pDrvIns The driver instance. * @param pszBwGroup Name of the bandwidth group to attach to. * @param pFilter Pointer to the filter we attach. */ VMMR3_INT_DECL(int) PDMR3NsAttach(PUVM pUVM, PPDMDRVINS pDrvIns, const char *pszBwGroup, PPDMNSFILTER pFilter) { VM_ASSERT_EMT(pUVM->pVM); AssertPtrReturn(pFilter, VERR_INVALID_POINTER); AssertReturn(pFilter->pBwGroupR3 == NULL, VERR_ALREADY_EXISTS); RT_NOREF_PV(pDrvIns); PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; LOCK_NETSHAPER_RETURN(pShaper); int rc = VINF_SUCCESS; PPDMNSBWGROUP pBwGroupNew = NULL; if (pszBwGroup) { pBwGroupNew = pdmNsBwGroupFindById(pShaper, pszBwGroup); if (pBwGroupNew) pdmNsBwGroupRef(pBwGroupNew); else rc = VERR_NOT_FOUND; } if (RT_SUCCESS(rc)) { PPDMNSBWGROUP pBwGroupOld = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, pBwGroupNew, PPDMNSBWGROUP); ASMAtomicWritePtr(&pFilter->pBwGroupR0, MMHyperR3ToR0(pUVM->pVM, pBwGroupNew)); if (pBwGroupOld) pdmNsBwGroupUnref(pBwGroupOld); pdmNsFilterLink(pFilter); } UNLOCK_NETSHAPER(pShaper); return rc; }
static NTSTATUS vboxNewProtDeviceRemoved(PVBOXMOUSE_DEVEXT pDevExt) { if (!vboxNewProtIsEnabled()) { WARN(("New Protocol is disabled")); return STATUS_UNSUCCESSFUL; } KIRQL Irql; NTSTATUS Status = STATUS_SUCCESS; KeAcquireSpinLock(&g_ctx.SyncLock, &Irql); RemoveEntryList(&pDevExt->ListEntry); if (g_ctx.pCurrentDevExt == pDevExt) { ObDereferenceObject(pDevExt->pdoSelf); g_ctx.pCurrentDevExt = NULL; for (PLIST_ENTRY pCur = g_ctx.DevExtList.Flink; pCur != &g_ctx.DevExtList; pCur = pCur->Flink) { PVBOXMOUSE_DEVEXT pNewCurDevExt = PVBOXMOUSE_DEVEXT_FROM_LE(pCur); ASMAtomicWritePtr(&g_ctx.pCurrentDevExt, pNewCurDevExt); /* ensure the object is not deleted while it is being used by a poller thread */ ObReferenceObject(pNewCurDevExt->pdoSelf); break; } } KeReleaseSpinLock(&g_ctx.SyncLock, Irql); return Status; }
int pdmacFileAioMgrAddEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) { LogFlowFunc(("pAioMgr=%#p pEndpoint=%#p{%s}\n", pAioMgr, pEndpoint, pEndpoint->Core.pszUri)); /* Update the assigned I/O manager. */ ASMAtomicWritePtr(&pEndpoint->pAioMgr, pAioMgr); int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent); AssertRCReturn(rc, rc); ASMAtomicWritePtr(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint, pEndpoint); rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_ADD_ENDPOINT); ASMAtomicWriteNullPtr(&pAioMgr->BlockingEventData.AddEndpoint.pEndpoint); RTCritSectLeave(&pAioMgr->CritSectBlockingEvent); return rc; }
/** * Insert the per thread data structure into the tree. * * This can be called from both the thread it self and the parent, * thus it must handle insertion failures in a nice manner. * * @param pThread Pointer to thread structure allocated by rtThreadAlloc(). * @param NativeThread The native thread id. */ DECLHIDDEN(void) rtThreadInsert(PRTTHREADINT pThread, RTNATIVETHREAD NativeThread) { Assert(pThread); Assert(pThread->u32Magic == RTTHREADINT_MAGIC); { RT_THREAD_LOCK_RW(); /* * Do not insert a terminated thread. * * This may happen if the thread finishes before the RTThreadCreate call * gets this far. Since the OS may quickly reuse the native thread ID * it should not be reinserted at this point. */ if (rtThreadGetState(pThread) != RTTHREADSTATE_TERMINATED) { /* * Before inserting we must check if there is a thread with this id * in the tree already. We're racing parent and child on insert here * so that the handle is valid in both ends when they return / start. * * If it's not ourself we find, it's a dead alien thread and we will * unlink it from the tree. Alien threads will be released at this point. */ PRTTHREADINT pThreadOther = (PRTTHREADINT)RTAvlPVGet(&g_ThreadTree, (void *)NativeThread); if (pThreadOther != pThread) { bool fRc; /* remove dead alien if any */ if (pThreadOther) { AssertMsg(pThreadOther->fIntFlags & RTTHREADINT_FLAGS_ALIEN, ("%p:%s; %p:%s\n", pThread, pThread->szName, pThreadOther, pThreadOther->szName)); ASMAtomicBitClear(&pThread->fIntFlags, RTTHREADINT_FLAG_IN_TREE_BIT); rtThreadRemoveLocked(pThreadOther); if (pThreadOther->fIntFlags & RTTHREADINT_FLAGS_ALIEN) rtThreadRelease(pThreadOther); } /* insert the thread */ ASMAtomicWritePtr(&pThread->Core.Key, (void *)NativeThread); fRc = RTAvlPVInsert(&g_ThreadTree, &pThread->Core); ASMAtomicOrU32(&pThread->fIntFlags, RTTHREADINT_FLAG_IN_TREE); if (fRc) ASMAtomicIncU32(&g_cThreadInTree); AssertReleaseMsg(fRc, ("Lock problem? %p (%RTnthrd) %s\n", pThread, NativeThread, pThread->szName)); NOREF(fRc); } } RT_THREAD_UNLOCK_RW(); } }
/** * Destroys the per thread data. * * @param pThread The thread to destroy. */ static void rtThreadDestroy(PRTTHREADINT pThread) { RTSEMEVENTMULTI hEvt1, hEvt2; /* * Remove it from the tree and mark it as dead. * * Threads that has seen rtThreadTerminate and should already have been * removed from the tree. There is probably no thread that should * require removing here. However, be careful making sure that cRefs * isn't 0 if we do or we'll blow up because the strict locking code * will be calling us back. */ if (ASMBitTest(&pThread->fIntFlags, RTTHREADINT_FLAG_IN_TREE_BIT)) { ASMAtomicIncU32(&pThread->cRefs); rtThreadRemove(pThread); ASMAtomicDecU32(&pThread->cRefs); } /* * Invalidate the thread structure. */ #ifdef IN_RING3 rtLockValidatorSerializeDestructEnter(); rtLockValidatorDeletePerThread(&pThread->LockValidator); #endif #ifdef RT_WITH_ICONV_CACHE rtStrIconvCacheDestroy(pThread); #endif ASMAtomicXchgU32(&pThread->u32Magic, RTTHREADINT_MAGIC_DEAD); ASMAtomicWritePtr(&pThread->Core.Key, (void *)NIL_RTTHREAD); pThread->enmType = RTTHREADTYPE_INVALID; hEvt1 = pThread->EventUser; pThread->EventUser = NIL_RTSEMEVENTMULTI; hEvt2 = pThread->EventTerminated; pThread->EventTerminated = NIL_RTSEMEVENTMULTI; #ifdef IN_RING3 rtLockValidatorSerializeDestructLeave(); #endif /* * Destroy semaphore resources and free the bugger. */ RTSemEventMultiDestroy(hEvt1); if (hEvt2 != NIL_RTSEMEVENTMULTI) RTSemEventMultiDestroy(hEvt2); rtThreadNativeDestroy(pThread); RTMemFree(pThread); }
static int pdmacFileAioMgrCloseEndpoint(PPDMACEPFILEMGR pAioMgr, PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) { int rc = RTCritSectEnter(&pAioMgr->CritSectBlockingEvent); AssertRCReturn(rc, rc); ASMAtomicWritePtr(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint, pEndpoint); rc = pdmacFileAioMgrWaitForBlockingEvent(pAioMgr, PDMACEPFILEAIOMGRBLOCKINGEVENT_CLOSE_ENDPOINT); ASMAtomicWriteNullPtr(&pAioMgr->BlockingEventData.CloseEndpoint.pEndpoint); RTCritSectLeave(&pAioMgr->CritSectBlockingEvent); return rc; }
/** * Recycles a requst. * * @returns true if recycled, false if it should be freed. * @param pQueue The queue. * @param pReq The request. */ DECLHIDDEN(bool) rtReqQueueRecycle(PRTREQQUEUEINT pQueue, PRTREQINT pReq) { if ( !pQueue || pQueue->cReqFree >= 128) return false; ASMAtomicIncU32(&pQueue->cReqFree); PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)]; PRTREQ pNext; do { pNext = *ppHead; ASMAtomicWritePtr(&pReq->pNext, pNext); } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext)); return true; }
/** * Resolves a symbol. * * @returns Pointer to the symbol on success, NULL on failure. * * @param pHandle The IDC handle. * @param pszName The name of the symbol. */ static void supR0IdcGetSymbol(PSUPDRVIDCHANDLE pHandle, PFNRT *ppfn, const char *pszName) { SUPDRVIDCREQGETSYM Req; int rc; /* * Create and send a get symbol request. */ Req.Hdr.cb = sizeof(Req); Req.Hdr.rc = VERR_WRONG_ORDER; Req.Hdr.pSession = pHandle->s.pSession; Req.u.In.pszSymbol = pszName; Req.u.In.pszModule = NULL; rc = supR0IdcNativeCall(pHandle, SUPDRV_IDC_REQ_GET_SYMBOL, &Req.Hdr); if (RT_SUCCESS(rc)) ASMAtomicWritePtr((void * volatile *)ppfn, Req.u.Out.pfnSymbol); }
VMMR3DECL(int) PDMR3NsAttach(PVM pVM, PPDMDRVINS pDrvIns, const char *pcszBwGroup, PPDMNSFILTER pFilter) { VM_ASSERT_EMT(pVM); AssertPtrReturn(pFilter, VERR_INVALID_POINTER); AssertReturn(pFilter->pBwGroupR3 == NULL, VERR_ALREADY_EXISTS); PUVM pUVM = pVM->pUVM; PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; PPDMNSBWGROUP pBwGroupOld = NULL; PPDMNSBWGROUP pBwGroupNew = NULL; int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); if (RT_SUCCESS(rc)) { if (pcszBwGroup) { pBwGroupNew = pdmNsBwGroupFindById(pShaper, pcszBwGroup); if (pBwGroupNew) pdmNsBwGroupRef(pBwGroupNew); else rc = VERR_NOT_FOUND; } if (RT_SUCCESS(rc)) { pBwGroupOld = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, pBwGroupNew, PPDMNSBWGROUP); ASMAtomicWritePtr(&pFilter->pBwGroupR0, MMHyperR3ToR0(pVM, pBwGroupNew)); if (pBwGroupOld) pdmNsBwGroupUnref(pBwGroupOld); pdmNsFilterLink(pFilter); } int rc2 = RTCritSectLeave(&pShaper->cs); AssertRC(rc2); } return rc; }
RTDECL(int) RTFileAioReqCancel(RTFILEAIOREQ hReq) { PRTFILEAIOREQINTERNAL pReqInt = hReq; RTFILEAIOREQ_VALID_RETURN(pReqInt); RTFILEAIOREQ_STATE_RETURN_RC(pReqInt, SUBMITTED, VERR_FILE_AIO_NOT_SUBMITTED); ASMAtomicXchgBool(&pReqInt->fCanceled, true); int rcPosix = aio_cancel(pReqInt->AioCB.aio_fildes, &pReqInt->AioCB); if (rcPosix == AIO_CANCELED) { PRTFILEAIOCTXINTERNAL pCtxInt = pReqInt->pCtxInt; /* * Notify the waiting thread that the request was canceled. */ AssertMsg(VALID_PTR(pCtxInt), ("Invalid state. Request was canceled but wasn't submitted\n")); Assert(!pCtxInt->pReqToCancel); ASMAtomicWritePtr(&pCtxInt->pReqToCancel, pReqInt); rtFileAioCtxWakeup(pCtxInt); /* Wait for acknowledge. */ int rc = RTSemEventWait(pCtxInt->SemEventCancel, RT_INDEFINITE_WAIT); AssertRC(rc); ASMAtomicWriteNullPtr(&pCtxInt->pReqToCancel); pReqInt->Rc = VERR_FILE_AIO_CANCELED; RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); return VINF_SUCCESS; } else if (rcPosix == AIO_ALLDONE) return VERR_FILE_AIO_COMPLETED; else if (rcPosix == AIO_NOTCANCELED) return VERR_FILE_AIO_IN_PROGRESS; else return RTErrConvertFromErrno(errno); }
static NTSTATUS vboxNewProtDeviceAdded(PVBOXMOUSE_DEVEXT pDevExt) { if (!vboxNewProtIsEnabled()) { WARN(("New Protocol is disabled")); return STATUS_UNSUCCESSFUL; } NTSTATUS Status = STATUS_SUCCESS; KIRQL Irql; KeAcquireSpinLock(&g_ctx.SyncLock, &Irql); InsertHeadList(&g_ctx.DevExtList, &pDevExt->ListEntry); if (!g_ctx.pCurrentDevExt) { ASMAtomicWritePtr(&g_ctx.pCurrentDevExt, pDevExt); /* ensure the object is not deleted while it is being used by a poller thread */ ObReferenceObject(pDevExt->pdoSelf); } KeReleaseSpinLock(&g_ctx.SyncLock, Irql); return Status; }
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs) { int rc = VINF_SUCCESS; PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; /* Parameter checks */ AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE); AssertReturn(cReqs != 0, VERR_INVALID_POINTER); AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER); rtFileAioCtxDump(pCtxInt); /* Check that we don't exceed the limit */ if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests) return VERR_FILE_AIO_LIMIT_EXCEEDED; PRTFILEAIOREQINTERNAL pHead = NULL; do { int rcPosix = 0; size_t cReqsSubmit = 0; size_t i = 0; PRTFILEAIOREQINTERNAL pReqInt; while ( (i < cReqs) && (i < AIO_LISTIO_MAX)) { pReqInt = pahReqs[i]; if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt)) { /* Undo everything and stop submitting. */ for (size_t iUndo = 0; iUndo < i; iUndo++) { pReqInt = pahReqs[iUndo]; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); pReqInt->pCtxInt = NULL; /* Unlink from the list again. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; } rc = VERR_INVALID_HANDLE; break; } pReqInt->pCtxInt = pCtxInt; if (pReqInt->fFlush) break; /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); cReqsSubmit++; i++; } if (cReqsSubmit) { rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL); if (RT_UNLIKELY(rcPosix < 0)) { size_t cReqsSubmitted = cReqsSubmit; if (errno == EAGAIN) rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; else rc = RTErrConvertFromErrno(errno); /* Check which ones were not submitted. */ for (i = 0; i < cReqsSubmit; i++) { pReqInt = pahReqs[i]; rcPosix = aio_error(&pReqInt->AioCB); if ((rcPosix != EINPROGRESS) && (rcPosix != 0)) { cReqsSubmitted--; #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) if (errno == EINVAL) #else if (rcPosix == EINVAL) #endif { /* Was not submitted. */ RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { /* An error occurred. */ RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); /* * Looks like Apple and glibc interpret the standard in different ways. * glibc returns the error code which would be in errno but Apple returns * -1 and sets errno to the appropriate value */ #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) Assert(rcPosix == -1); pReqInt->Rc = RTErrConvertFromErrno(errno); #elif defined(RT_OS_LINUX) pReqInt->Rc = RTErrConvertFromErrno(rcPosix); #endif pReqInt->cbTransfered = 0; } /* Unlink from the list. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; pReqInt->pNext = NULL; pReqInt->pPrev = NULL; } } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); break; } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs -= cReqsSubmit; pahReqs += cReqsSubmit; } /* * Check if we have a flush request now. * If not we hit the AIO_LISTIO_MAX limit * and will continue submitting requests * above. */ if (cReqs && RT_SUCCESS_NP(rc)) { pReqInt = pahReqs[0]; if (pReqInt->fFlush) { /* * lio_listio does not work with flush requests so * we have to use aio_fsync directly. */ rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB); if (RT_UNLIKELY(rcPosix < 0)) { if (errno == EAGAIN) { rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { rc = RTErrConvertFromErrno(errno); RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pReqInt->Rc = rc; } pReqInt->cbTransfered = 0; break; } /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); ASMAtomicIncS32(&pCtxInt->cRequests); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs--; pahReqs++; } } } while ( cReqs && RT_SUCCESS_NP(rc)); if (pHead) { /* * Forward successfully submitted requests to the thread waiting for requests. * We search for a free slot first and if we don't find one * we will grab the first one and append our list to the existing entries. */ unsigned iSlot = 0; while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead)) && !ASMAtomicCmpXchgPtr(&pCtxInt->apReqsNewHead[iSlot], pHead, NULL)) iSlot++; if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead)) { /* Nothing found. */ PRTFILEAIOREQINTERNAL pOldHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[0], NULL, PRTFILEAIOREQINTERNAL); /* Find the end of the current head and link the old list to the current. */ PRTFILEAIOREQINTERNAL pTail = pHead; while (pTail->pNext) pTail = pTail->pNext; pTail->pNext = pOldHead; ASMAtomicWritePtr(&pCtxInt->apReqsNewHead[0], pHead); } /* Set the internal wakeup flag and wakeup the thread if possible. */ bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true); if (!fWokenUp) rtFileAioCtxWakeup(pCtxInt); } rtFileAioCtxDump(pCtxInt); return rc; }
/** * Called the first time somebody asks for the time or when the GIP * is mapped/unmapped. */ static DECLCALLBACK(uint64_t) rtTimeNanoTSInternalRediscover(PRTTIMENANOTSDATA pData) { PSUPGLOBALINFOPAGE pGip = g_pSUPGlobalInfoPage; # ifdef IN_RC uint32_t iWorker; # else PFNTIMENANOTSINTERNAL pfnWorker; # endif if ( pGip && pGip->u32Magic == SUPGLOBALINFOPAGE_MAGIC && ( pGip->u32Mode == SUPGIPMODE_INVARIANT_TSC || pGip->u32Mode == SUPGIPMODE_SYNC_TSC || pGip->u32Mode == SUPGIPMODE_ASYNC_TSC)) { if (ASMCpuId_EDX(1) & X86_CPUID_FEATURE_EDX_SSE2) { # ifdef IN_RC iWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTIMENANO_WORKER_LFENCE_ASYNC : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTIMENANO_WORKER_LFENCE_SYNC_INVAR_NO_DELTA : RTTIMENANO_WORKER_LFENCE_SYNC_INVAR_WITH_DELTA; # elif defined(IN_RING0) pfnWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTimeNanoTSLFenceAsync : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDelta; # else if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC) pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? RTTimeNanoTSLFenceAsyncUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? RTTimeNanoTSLFenceAsyncUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? RTTimeNanoTSLFenceAsyncUseApicId : rtTimeNanoTSInternalFallback; else pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDeltaUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDeltaUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLFenceSyncInvarNoDelta : RTTimeNanoTSLFenceSyncInvarWithDeltaUseApicId : rtTimeNanoTSInternalFallback; # endif } else { # ifdef IN_RC iWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTIMENANO_WORKER_LEGACY_ASYNC : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTIMENANO_WORKER_LEGACY_SYNC_INVAR_NO_DELTA : RTTIMENANO_WORKER_LEGACY_SYNC_INVAR_WITH_DELTA; # elif defined(IN_RING0) pfnWorker = pGip->u32Mode == SUPGIPMODE_ASYNC_TSC ? RTTimeNanoTSLegacyAsync : pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDelta; # else if (pGip->u32Mode == SUPGIPMODE_ASYNC_TSC) pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? RTTimeNanoTSLegacyAsyncUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? RTTimeNanoTSLegacyAsyncUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? RTTimeNanoTSLegacyAsyncUseApicId : rtTimeNanoTSInternalFallback; else pfnWorker = pGip->fGetGipCpu & SUPGIPGETCPU_RDTSCP_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDeltaUseRdtscp : pGip->fGetGipCpu & SUPGIPGETCPU_IDTR_LIMIT_MASK_MAX_SET_CPUS ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_PRACTICALLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDeltaUseIdtrLim : pGip->fGetGipCpu & SUPGIPGETCPU_APIC_ID ? pGip->enmUseTscDelta <= SUPGIPUSETSCDELTA_ROUGHLY_ZERO ? RTTimeNanoTSLegacySyncInvarNoDelta : RTTimeNanoTSLegacySyncInvarWithDeltaUseApicId : rtTimeNanoTSInternalFallback; # endif } } else # ifdef IN_RC iWorker = RTTIMENANO_WORKER_FALLBACK; # else pfnWorker = rtTimeNanoTSInternalFallback; # endif # ifdef IN_RC ASMAtomicWriteU32((uint32_t volatile *)&g_iWorker, iWorker); return g_apfnWorkers[iWorker](pData); # else ASMAtomicWritePtr((void * volatile *)&g_pfnWorker, (void *)(uintptr_t)pfnWorker); return pfnWorker(pData); # endif }