/** * Thread checking for expired requests. * * @returns IPRT status code. * @param pThread Thread handle. * @param pvUser Opaque user data. */ static int drvdiskIntIoReqExpiredCheck(RTTHREAD pThread, void *pvUser) { PDRVDISKINTEGRITY pThis = (PDRVDISKINTEGRITY)pvUser; while (pThis->fRunning) { int rc = RTSemEventWait(pThis->SemEvent, pThis->uCheckIntervalMs); if (!pThis->fRunning) break; Assert(rc == VERR_TIMEOUT); /* Get current timestamp for comparison. */ uint64_t tsCurr = RTTimeSystemMilliTS(); /* Go through the array and check for expired requests. */ for (unsigned i = 0; i < RT_ELEMENTS(pThis->apReqActive); i++) { PDRVDISKAIOREQACTIVE pReqActive = &pThis->apReqActive[i]; PDRVDISKAIOREQ pIoReq = ASMAtomicReadPtrT(&pReqActive->pIoReq, PDRVDISKAIOREQ); if ( pIoReq && (tsCurr > pReqActive->tsStart) && (tsCurr - pReqActive->tsStart) >= pThis->uExpireIntervalMs) { RTMsgError("Request %#p expired (active for %llu ms already)\n", pIoReq, tsCurr - pReqActive->tsStart); RTAssertDebugBreak(); } } } return VINF_SUCCESS; }
/** * Allocates and acquires the lock for the stream. * * @returns IPRT status. * @param pStream The stream (valid). */ static int rtStrmAllocLock(PRTSTREAM pStream) { Assert(pStream->pCritSect == NULL); PRTCRITSECT pCritSect = (PRTCRITSECT)RTMemAlloc(sizeof(*pCritSect)); if (!pCritSect) return VERR_NO_MEMORY; /* The native stream lock are normally not recursive. */ int rc = RTCritSectInitEx(pCritSect, RTCRITSECT_FLAGS_NO_NESTING, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemSpinMutex"); if (RT_SUCCESS(rc)) { rc = RTCritSectEnter(pCritSect); if (RT_SUCCESS(rc)) { if (RT_LIKELY(ASMAtomicCmpXchgPtr(&pStream->pCritSect, pCritSect, NULL))) return VINF_SUCCESS; RTCritSectLeave(pCritSect); } RTCritSectDelete(pCritSect); } RTMemFree(pCritSect); /* Handle the lost race case... */ pCritSect = ASMAtomicReadPtrT(&pStream->pCritSect, PRTCRITSECT); if (pCritSect) return RTCritSectEnter(pCritSect); return rc; }
static DECLCALLBACK(void) pdmacR3TimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser) { uint64_t tsCur = RTTimeProgramMilliTS(); uint64_t cMilliesNext = UINT64_MAX; PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pvUser; ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, UINT64_MAX); /* Go through all endpoints and check for expired requests. */ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpClassFile->Core.pEndpointsHead; while (pEpFile) { /* Check for an expired delay. */ if (pEpFile->pDelayedHead != NULL) { PPDMASYNCCOMPLETIONTASKFILE pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pDelayedHead, NULL, PPDMASYNCCOMPLETIONTASKFILE); while (pTaskFile) { PPDMASYNCCOMPLETIONTASKFILE pTmp = pTaskFile; pTaskFile = pTaskFile->pDelayedNext; if (tsCur >= pTmp->tsDelayEnd) { LogRel(("AIOMgr: Delayed request %#p completed\n", pTmp)); pdmR3AsyncCompletionCompleteTask(&pTmp->Core, pTmp->rc, true); } else { /* Prepend to the delayed list again. */ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL; if (pTmp->tsDelayEnd - tsCur < cMilliesNext) cMilliesNext = pTmp->tsDelayEnd - tsCur; do { pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE); pTmp->pDelayedNext = pHead; } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTmp, pHead)); } } } pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpFile->Core.pNext; } if (cMilliesNext < pEpClassFile->cMilliesNext) { ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, cMilliesNext); TMTimerSetMillies(pEpClassFile->pTimer, cMilliesNext); } }
int pdmacFileEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask) { PPDMACTASKFILE pNext; do { pNext = pEndpoint->pTasksNewHead; pTask->pNext = pNext; } while (!ASMAtomicCmpXchgPtr(&pEndpoint->pTasksNewHead, pTask, pNext)); pdmacFileAioMgrWakeup(ASMAtomicReadPtrT(&pEndpoint->pAioMgr, PPDMACEPFILEMGR)); return VINF_SUCCESS; }
RTDECL(bool) RTReqQueueIsBusy(RTREQQUEUE hQueue) { PRTREQQUEUEINT pQueue = hQueue; AssertPtrReturn(pQueue, false); if (ASMAtomicReadBool(&pQueue->fBusy)) return true; if (ASMAtomicReadPtrT(&pQueue->pReqs, PRTREQ) != NULL) return true; if (ASMAtomicReadBool(&pQueue->fBusy)) return true; return false; }
/** * Obtain bandwidth in a bandwidth group. * * @returns True if bandwidth was allocated, false if not. * @param pFilter Pointer to the filter that allocates bandwidth. * @param cbTransfer Number of bytes to allocate. */ VMMDECL(bool) PDMNsAllocateBandwidth(PPDMNSFILTER pFilter, size_t cbTransfer) { AssertPtrReturn(pFilter, true); if (!VALID_PTR(pFilter->CTX_SUFF(pBwGroup))) return true; PPDMNSBWGROUP pBwGroup = ASMAtomicReadPtrT(&pFilter->CTX_SUFF(pBwGroup), PPDMNSBWGROUP); int rc = PDMCritSectEnter(&pBwGroup->Lock, VERR_SEM_BUSY); AssertRC(rc); if (RT_UNLIKELY(rc == VERR_SEM_BUSY)) return true; bool fAllowed = true; if (pBwGroup->cbPerSecMax) { /* Re-fill the bucket first */ uint64_t tsNow = RTTimeSystemNanoTS(); uint32_t uTokensAdded = (tsNow - pBwGroup->tsUpdatedLast) * pBwGroup->cbPerSecMax / (1000 * 1000 * 1000); uint32_t uTokens = RT_MIN(pBwGroup->cbBucket, uTokensAdded + pBwGroup->cbTokensLast); if (cbTransfer > uTokens) { fAllowed = false; ASMAtomicWriteBool(&pFilter->fChoked, true); } else { pBwGroup->tsUpdatedLast = tsNow; pBwGroup->cbTokensLast = uTokens - (uint32_t)cbTransfer; } Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} cbTransfer=%u uTokens=%u uTokensAdded=%u fAllowed=%RTbool\n", pBwGroup, R3STRING(pBwGroup->pszNameR3), cbTransfer, uTokens, uTokensAdded, fAllowed)); } else Log2(("pdmNsAllocateBandwidth: BwGroup=%#p{%s} disabled fAllowed=%RTbool\n", pBwGroup, R3STRING(pBwGroup->pszNameR3), fAllowed)); rc = PDMCritSectLeave(&pBwGroup->Lock); AssertRC(rc); return fAllowed; }
VMMR3DECL(bool) PDMR3NsAllocateBandwidth(PPDMNSFILTER pFilter, uint32_t cbTransfer) { AssertPtrReturn(pFilter, true); if (!VALID_PTR(pFilter->pBwGroupR3)) return true; PPDMNSBWGROUP pBwGroup = ASMAtomicReadPtrT(&pFilter->pBwGroupR3, PPDMNSBWGROUP); int rc = RTCritSectEnter(&pBwGroup->cs); AssertRC(rc); bool fAllowed = true; if (pBwGroup->cbTransferPerSecMax) { /* Re-fill the bucket first */ uint64_t tsNow = RTTimeSystemNanoTS(); uint32_t uTokensAdded = (tsNow - pBwGroup->tsUpdatedLast)*pBwGroup->cbTransferPerSecMax/(1000*1000*1000); uint32_t uTokens = RT_MIN(pBwGroup->cbBucketSize, uTokensAdded + pBwGroup->cbTokensLast); if (cbTransfer > uTokens) { fAllowed = false; ASMAtomicWriteBool(&pFilter->fChoked, true); } else { pBwGroup->tsUpdatedLast = tsNow; pBwGroup->cbTokensLast = uTokens - cbTransfer; } Log2((LOG_FN_FMT "BwGroup=%#p{%s} cbTransfer=%u uTokens=%u uTokensAdded=%u fAllowed=%RTbool\n", __PRETTY_FUNCTION__, pBwGroup, pBwGroup->pszName, cbTransfer, uTokens, uTokensAdded, fAllowed)); } else Log2((LOG_FN_FMT "BwGroup=%#p{%s} disabled fAllowed=%RTbool\n", __PRETTY_FUNCTION__, pBwGroup, pBwGroup->pszName, fAllowed)); rc = RTCritSectLeave(&pBwGroup->cs); AssertRC(rc); return fAllowed; }
void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc) { PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser; LogFlowFunc(("pTask=%#p pvUser=%#p rc=%Rrc\n", pTask, pvUser, rc)); if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH) pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, rc, true); else { Assert((uint32_t)pTask->DataSeg.cbSeg == pTask->DataSeg.cbSeg && (int32_t)pTask->DataSeg.cbSeg >= 0); uint32_t uOld = ASMAtomicSubS32(&pTaskFile->cbTransferLeft, (int32_t)pTask->DataSeg.cbSeg); /* The first error will be returned. */ if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); #ifdef VBOX_WITH_DEBUGGER else { PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; /* Overwrite with injected error code. */ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ) rc = ASMAtomicXchgS32(&pEpFile->rcReqRead, VINF_SUCCESS); else rc = ASMAtomicXchgS32(&pEpFile->rcReqWrite, VINF_SUCCESS); if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); } #endif if (!(uOld - pTask->DataSeg.cbSeg) && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true)) { #ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEpFile->Core.pEpClass; /* Check if we should delay completion of the request. */ if ( ASMAtomicReadU32(&pEpFile->msDelay) > 0 && ASMAtomicReadU32(&pEpFile->cReqsDelay) > 0) { uint64_t tsDelay = pEpFile->msDelay; if (pEpFile->msJitter) tsDelay = (RTRandU32() % 100) > 50 ? pEpFile->msDelay + (RTRandU32() % pEpFile->msJitter) : pEpFile->msDelay - (RTRandU32() % pEpFile->msJitter); ASMAtomicDecU32(&pEpFile->cReqsDelay); /* Arm the delay. */ pTaskFile->tsDelayEnd = RTTimeProgramMilliTS() + tsDelay; /* Append to the list. */ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL; do { pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE); pTaskFile->pDelayedNext = pHead; } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTaskFile, pHead)); if (tsDelay < pEpClassFile->cMilliesNext) { ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, tsDelay); TMTimerSetMillies(pEpClassFile->pTimer, tsDelay); } LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, tsDelay)); } else #endif pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true); } } }
/** * Internal worker processing events and inserting new requests into the waiting list. */ static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt) { int rc = VINF_SUCCESS; /* Process new requests first. */ bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false); if (fWokenUp) { for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++) { PRTFILEAIOREQINTERNAL pReqHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[iSlot], NULL, PRTFILEAIOREQINTERNAL); while ( (pCtxInt->iFirstFree < pCtxInt->cReqsWaitMax) && pReqHead) { RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED); pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead; pReqHead->iWaitingList = pCtxInt->iFirstFree; pReqHead = pReqHead->pNext; /* Clear pointer to next and previous element just for safety. */ pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL; pCtxInt->apReqs[pCtxInt->iFirstFree]->pPrev = NULL; pCtxInt->iFirstFree++; Assert( (pCtxInt->iFirstFree <= pCtxInt->cMaxRequests) && (pCtxInt->iFirstFree <= pCtxInt->cReqsWaitMax)); } /* Append the rest to the wait list. */ if (pReqHead) { RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED); if (!pCtxInt->pReqsWaitHead) { Assert(!pCtxInt->pReqsWaitTail); pCtxInt->pReqsWaitHead = pReqHead; pReqHead->pPrev = NULL; } else { AssertPtr(pCtxInt->pReqsWaitTail); pCtxInt->pReqsWaitTail->pNext = pReqHead; pReqHead->pPrev = pCtxInt->pReqsWaitTail; } /* Update tail. */ while (pReqHead->pNext) { RTFIELAIOREQ_ASSERT_STATE(pReqHead->pNext, SUBMITTED); pReqHead = pReqHead->pNext; } pCtxInt->pReqsWaitTail = pReqHead; pCtxInt->pReqsWaitTail->pNext = NULL; } } /* Check if a request needs to be canceled. */ PRTFILEAIOREQINTERNAL pReqToCancel = ASMAtomicReadPtrT(&pCtxInt->pReqToCancel, PRTFILEAIOREQINTERNAL); if (pReqToCancel) { /* The request can be in the array waiting for completion or still in the list because it is full. */ if (pReqToCancel->iWaitingList != RTFILEAIOCTX_WAIT_ENTRY_INVALID) { /* Put it out of the waiting list. */ pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree]; pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList; } else { /* Unlink from the waiting list. */ PRTFILEAIOREQINTERNAL pPrev = pReqToCancel->pPrev; PRTFILEAIOREQINTERNAL pNext = pReqToCancel->pNext; if (pNext) pNext->pPrev = pPrev; else { /* We canceled the tail. */ pCtxInt->pReqsWaitTail = pPrev; } if (pPrev) pPrev->pNext = pNext; else { /* We canceled the head. */ pCtxInt->pReqsWaitHead = pNext; } } ASMAtomicDecS32(&pCtxInt->cRequests); AssertMsg(pCtxInt->cRequests >= 0, ("Canceled request not which is not in this context\n")); RTSemEventSignal(pCtxInt->SemEventCancel); } } else { if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false)) rc = VERR_INTERRUPTED; } return rc; }