PPDMACTASKFILE pdmacFileEpGetNewTasks(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) { /* * Get pending tasks. */ PPDMACTASKFILE pTasks = ASMAtomicXchgPtrT(&pEndpoint->pTasksNewHead, NULL, PPDMACTASKFILE); /* Reverse the list to process in FIFO order. */ if (pTasks) { PPDMACTASKFILE pTask = pTasks; pTasks = NULL; while (pTask) { PPDMACTASKFILE pCur = pTask; pTask = pTask->pNext; pCur->pNext = pTasks; pTasks = pCur; } } return pTasks; }
/** * Attach network filter driver from bandwidth group. * * @returns VBox status code. * @param pUVM The user mode VM structure. * @param pDrvIns The driver instance. * @param pszBwGroup Name of the bandwidth group to attach to. * @param pFilter Pointer to the filter we attach. */ VMMR3_INT_DECL(int) PDMR3NsAttach(PUVM pUVM, PPDMDRVINS pDrvIns, const char *pszBwGroup, PPDMNSFILTER pFilter) { VM_ASSERT_EMT(pUVM->pVM); AssertPtrReturn(pFilter, VERR_INVALID_POINTER); AssertReturn(pFilter->pBwGroupR3 == NULL, VERR_ALREADY_EXISTS); RT_NOREF_PV(pDrvIns); PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; LOCK_NETSHAPER_RETURN(pShaper); int rc = VINF_SUCCESS; PPDMNSBWGROUP pBwGroupNew = NULL; if (pszBwGroup) { pBwGroupNew = pdmNsBwGroupFindById(pShaper, pszBwGroup); if (pBwGroupNew) pdmNsBwGroupRef(pBwGroupNew); else rc = VERR_NOT_FOUND; } if (RT_SUCCESS(rc)) { PPDMNSBWGROUP pBwGroupOld = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, pBwGroupNew, PPDMNSBWGROUP); ASMAtomicWritePtr(&pFilter->pBwGroupR0, MMHyperR3ToR0(pUVM->pVM, pBwGroupNew)); if (pBwGroupOld) pdmNsBwGroupUnref(pBwGroupOld); pdmNsFilterLink(pFilter); } UNLOCK_NETSHAPER(pShaper); return rc; }
VBoxDbgBase::atStateChange(PUVM pUVM, VMSTATE enmState, VMSTATE /*enmOldState*/, void *pvUser) { VBoxDbgBase *pThis = (VBoxDbgBase *)pvUser; NOREF(pUVM); switch (enmState) { case VMSTATE_TERMINATED: { /** @todo need to do some locking here? */ PUVM pUVM2 = ASMAtomicXchgPtrT(&pThis->m_pUVM, NULL, PUVM); if (pUVM2) { Assert(pUVM2 == pUVM); pThis->sigTerminated(); VMR3ReleaseUVM(pUVM2); } break; } case VMSTATE_DESTROYING: pThis->sigDestroying(); break; default: break; } }
static DECLCALLBACK(void) pdmacR3TimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser) { uint64_t tsCur = RTTimeProgramMilliTS(); uint64_t cMilliesNext = UINT64_MAX; PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pvUser; ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, UINT64_MAX); /* Go through all endpoints and check for expired requests. */ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpClassFile->Core.pEndpointsHead; while (pEpFile) { /* Check for an expired delay. */ if (pEpFile->pDelayedHead != NULL) { PPDMASYNCCOMPLETIONTASKFILE pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pDelayedHead, NULL, PPDMASYNCCOMPLETIONTASKFILE); while (pTaskFile) { PPDMASYNCCOMPLETIONTASKFILE pTmp = pTaskFile; pTaskFile = pTaskFile->pDelayedNext; if (tsCur >= pTmp->tsDelayEnd) { LogRel(("AIOMgr: Delayed request %#p completed\n", pTmp)); pdmR3AsyncCompletionCompleteTask(&pTmp->Core, pTmp->rc, true); } else { /* Prepend to the delayed list again. */ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL; if (pTmp->tsDelayEnd - tsCur < cMilliesNext) cMilliesNext = pTmp->tsDelayEnd - tsCur; do { pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE); pTmp->pDelayedNext = pHead; } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTmp, pHead)); } } } pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpFile->Core.pNext; } if (cMilliesNext < pEpClassFile->cMilliesNext) { ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, cMilliesNext); TMTimerSetMillies(pEpClassFile->pTimer, cMilliesNext); } }
VBoxDbgBase::~VBoxDbgBase() { /* * If the VM is still around. */ /** @todo need to do some locking here? */ PVM pVM = ASMAtomicXchgPtrT(&m_pVM, NULL, PVM); if (pVM) { int rc = VMR3AtStateDeregister(pVM, atStateChange, this); AssertRC(rc); } }
RTDECL(int) RTReqQueueAlloc(RTREQQUEUE hQueue, RTREQTYPE enmType, PRTREQ *phReq) { /* * Validate input. */ PRTREQQUEUEINT pQueue = hQueue; AssertPtrReturn(pQueue, VERR_INVALID_HANDLE); AssertReturn(pQueue->u32Magic == RTREQQUEUE_MAGIC, VERR_INVALID_HANDLE); AssertMsgReturn(enmType > RTREQTYPE_INVALID && enmType < RTREQTYPE_MAX, ("%d\n", enmType), VERR_RT_REQUEST_INVALID_TYPE); /* * Try get a recycled packet. * * While this could all be solved with a single list with a lock, it's a sport * of mine to avoid locks. */ int cTries = RT_ELEMENTS(pQueue->apReqFree) * 2; while (--cTries >= 0) { PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)]; PRTREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PRTREQ); if (pReq) { PRTREQ pNext = pReq->pNext; if ( pNext && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL)) vmr3ReqJoinFree(pQueue, pReq->pNext); ASMAtomicDecU32(&pQueue->cReqFree); Assert(pReq->uOwner.hQueue == pQueue); Assert(!pReq->fPoolOrQueue); int rc = rtReqReInit(pReq, enmType); if (RT_SUCCESS(rc)) { *phReq = pReq; LogFlow(("RTReqQueueAlloc: returns VINF_SUCCESS *phReq=%p recycled\n", pReq)); return VINF_SUCCESS; } } } /* * Ok, allocate a new one. */ int rc = rtReqAlloc(enmType, false /*fPoolOrQueue*/, pQueue, phReq); LogFlow(("RTReqQueueAlloc: returns %Rrc *phReq=%p\n", rc, *phReq)); return rc; }
VMMR3DECL(int) PDMR3NsDetach(PVM pVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter) { VM_ASSERT_EMT(pVM); AssertPtrReturn(pFilter, VERR_INVALID_POINTER); AssertPtrReturn(pFilter->pBwGroupR3, VERR_INVALID_POINTER); PUVM pUVM = pVM->pUVM; PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); if (RT_SUCCESS(rc)) { pdmNsFilterUnlink(pFilter); PPDMNSBWGROUP pBwGroup = NULL; pBwGroup = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, NULL, PPDMNSBWGROUP); if (pBwGroup) pdmNsBwGroupUnref(pBwGroup); int rc2 = RTCritSectLeave(&pShaper->cs); AssertRC(rc2); } return rc; }
/** * Joins the list pList with whatever is linked up at *pHead. */ static void vmr3ReqJoinFreeSub(volatile PRTREQ *ppHead, PRTREQ pList) { for (unsigned cIterations = 0;; cIterations++) { PRTREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PRTREQ); if (!pHead) return; PRTREQ pTail = pHead; while (pTail->pNext) pTail = pTail->pNext; pTail->pNext = pList; if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList)) return; pTail->pNext = NULL; if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL)) return; pList = pHead; Assert(cIterations != 32); Assert(cIterations != 64); } }
VMMR3DECL(int) PDMR3NsAttach(PVM pVM, PPDMDRVINS pDrvIns, const char *pcszBwGroup, PPDMNSFILTER pFilter) { VM_ASSERT_EMT(pVM); AssertPtrReturn(pFilter, VERR_INVALID_POINTER); AssertReturn(pFilter->pBwGroupR3 == NULL, VERR_ALREADY_EXISTS); PUVM pUVM = pVM->pUVM; PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; PPDMNSBWGROUP pBwGroupOld = NULL; PPDMNSBWGROUP pBwGroupNew = NULL; int rc = RTCritSectEnter(&pShaper->cs); AssertRC(rc); if (RT_SUCCESS(rc)) { if (pcszBwGroup) { pBwGroupNew = pdmNsBwGroupFindById(pShaper, pcszBwGroup); if (pBwGroupNew) pdmNsBwGroupRef(pBwGroupNew); else rc = VERR_NOT_FOUND; } if (RT_SUCCESS(rc)) { pBwGroupOld = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, pBwGroupNew, PPDMNSBWGROUP); ASMAtomicWritePtr(&pFilter->pBwGroupR0, MMHyperR3ToR0(pVM, pBwGroupNew)); if (pBwGroupOld) pdmNsBwGroupUnref(pBwGroupOld); pdmNsFilterLink(pFilter); } int rc2 = RTCritSectLeave(&pShaper->cs); AssertRC(rc2); } return rc; }
/** * Detach network filter driver from bandwidth group. * * @returns VBox status code. * @param pUVM The user mode VM handle. * @param pDrvIns The driver instance. * @param pFilter Pointer to the filter we detach. */ VMMR3_INT_DECL(int) PDMR3NsDetach(PUVM pUVM, PPDMDRVINS pDrvIns, PPDMNSFILTER pFilter) { RT_NOREF_PV(pDrvIns); VM_ASSERT_EMT(pUVM->pVM); AssertPtrReturn(pFilter, VERR_INVALID_POINTER); /* Now, return quietly if the filter isn't attached since driver/device destructors are called on constructor failure. */ if (!pFilter->pBwGroupR3) return VINF_SUCCESS; AssertPtrReturn(pFilter->pBwGroupR3, VERR_INVALID_POINTER); PPDMNETSHAPER pShaper = pUVM->pdm.s.pNetShaper; LOCK_NETSHAPER_RETURN(pShaper); pdmNsFilterUnlink(pFilter); PPDMNSBWGROUP pBwGroup = ASMAtomicXchgPtrT(&pFilter->pBwGroupR3, NULL, PPDMNSBWGROUP); if (pBwGroup) pdmNsBwGroupUnref(pBwGroup); UNLOCK_NETSHAPER(pShaper); return VINF_SUCCESS; }
void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc) { PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser; LogFlowFunc(("pTask=%#p pvUser=%#p rc=%Rrc\n", pTask, pvUser, rc)); if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH) { pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, rc, true); } else { Assert((uint32_t)pTask->DataSeg.cbSeg == pTask->DataSeg.cbSeg && (int32_t)pTask->DataSeg.cbSeg >= 0); uint32_t uOld = ASMAtomicSubS32(&pTaskFile->cbTransferLeft, (int32_t)pTask->DataSeg.cbSeg); /* The first error will be returned. */ if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); #ifdef VBOX_WITH_DEBUGGER else { PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; /* Overwrite with injected error code. */ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ) rc = ASMAtomicXchgS32(&pEpFile->rcReqRead, VINF_SUCCESS); else rc = ASMAtomicXchgS32(&pEpFile->rcReqWrite, VINF_SUCCESS); if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); } #endif if (!(uOld - pTask->DataSeg.cbSeg) && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true)) { #ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; /* Check if we should delay completion of the request. */ if ( ASMAtomicReadU32(&pEpFile->msDelay) > 0 && ASMAtomicCmpXchgPtr(&pEpFile->pReqDelayed, pTaskFile, NULL)) { /* Arm the delay. */ pEpFile->tsDelayEnd = RTTimeProgramMilliTS() + pEpFile->msDelay; LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, pEpFile->msDelay)); return; } #endif pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true); #if PDM_ASYNC_COMPLETION_FILE_WITH_DELAY /* Check for an expired delay. */ if ( pEpFile->pReqDelayed != NULL && RTTimeProgramMilliTS() >= pEpFile->tsDelayEnd) { pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pReqDelayed, NULL, PPDMASYNCCOMPLETIONTASKFILE); ASMAtomicXchgU32(&pEpFile->msDelay, 0); LogRel(("AIOMgr: Delayed request %#p completed\n", pTaskFile)); pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true); } #endif } } }
/** * Sets the default logger instance. * * @returns iprt status code. * @param pLogger The new default release logger instance. */ RTDECL(PRTLOGGER) RTLogRelSetDefaultInstance(PRTLOGGER pLogger) { return ASMAtomicXchgPtrT(&g_pRelLogger, pLogger, PRTLOGGER); }
RTDECL(int) RTReqQueueProcess(RTREQQUEUE hQueue, RTMSINTERVAL cMillies) { LogFlow(("RTReqQueueProcess %x\n", hQueue)); /* * Check input. */ PRTREQQUEUEINT pQueue = hQueue; AssertPtrReturn(pQueue, VERR_INVALID_HANDLE); AssertReturn(pQueue->u32Magic == RTREQQUEUE_MAGIC, VERR_INVALID_HANDLE); /* * Process loop. Stop (break) after the first non-VINF_SUCCESS status code. */ int rc = VINF_SUCCESS; for (;;) { /* * Get pending requests. */ PRTREQ pReqs = ASMAtomicXchgPtrT(&pQueue->pAlreadyPendingReqs, NULL, PRTREQ); if (RT_LIKELY(!pReqs)) { pReqs = ASMAtomicXchgPtrT(&pQueue->pReqs, NULL, PRTREQ); if (!pReqs) { /* We do not adjust cMillies (documented behavior). */ ASMAtomicWriteBool(&pQueue->fBusy, false); /* this aint 100% perfect, but it's good enough for now... */ rc = RTSemEventWait(pQueue->EventSem, cMillies); if (rc != VINF_SUCCESS) break; continue; } ASMAtomicWriteBool(&pQueue->fBusy, true); /* * Reverse the list to process it in FIFO order. */ PRTREQ pReq = pReqs; if (pReq->pNext) Log2(("RTReqQueueProcess: 2+ requests: %p %p %p\n", pReq, pReq->pNext, pReq->pNext->pNext)); pReqs = NULL; while (pReq) { Assert(pReq->enmState == RTREQSTATE_QUEUED); Assert(pReq->uOwner.hQueue == pQueue); PRTREQ pCur = pReq; pReq = pReq->pNext; pCur->pNext = pReqs; pReqs = pCur; } } else ASMAtomicWriteBool(&pQueue->fBusy, true); /* * Process the requests. */ while (pReqs) { /* Unchain the first request and advance the list. */ PRTREQ pReq = pReqs; pReqs = pReqs->pNext; pReq->pNext = NULL; /* Process the request. */ rc = rtReqProcessOne(pReq); AssertRC(rc); if (rc != VINF_SUCCESS) { /* Propagate the return code to caller. If more requests pending, queue them for later. */ if (pReqs) { pReqs = ASMAtomicXchgPtrT(&pQueue->pAlreadyPendingReqs, pReqs, PRTREQ); Assert(!pReqs); } break; } } if (rc != VINF_SUCCESS) break; } LogFlow(("RTReqQueueProcess: returns %Rrc\n", rc)); return rc; }
RTDECL(int) RTReqQueueProcess(RTREQQUEUE hQueue, RTMSINTERVAL cMillies) { LogFlow(("RTReqQueueProcess %x\n", hQueue)); /* * Check input. */ PRTREQQUEUEINT pQueue = hQueue; AssertPtrReturn(pQueue, VERR_INVALID_HANDLE); AssertReturn(pQueue->u32Magic == RTREQQUEUE_MAGIC, VERR_INVALID_HANDLE); /* * Process loop. * * We do not repeat the outer loop if we've got an informational status code * since that code needs processing by our caller. */ int rc = VINF_SUCCESS; while (rc <= VINF_SUCCESS) { /* * Get pending requests. */ PRTREQ pReqs = ASMAtomicXchgPtrT(&pQueue->pReqs, NULL, PRTREQ); if (!pReqs) { ASMAtomicWriteBool(&pQueue->fBusy, false); /* this aint 100% perfect, but it's good enough for now... */ /** @todo We currently don't care if the entire time wasted here is larger than * cMillies */ rc = RTSemEventWait(pQueue->EventSem, cMillies); if (rc != VINF_SUCCESS) break; continue; } ASMAtomicWriteBool(&pQueue->fBusy, true); /* * Reverse the list to process it in FIFO order. */ PRTREQ pReq = pReqs; if (pReq->pNext) Log2(("RTReqQueueProcess: 2+ requests: %p %p %p\n", pReq, pReq->pNext, pReq->pNext->pNext)); pReqs = NULL; while (pReq) { Assert(pReq->enmState == RTREQSTATE_QUEUED); Assert(pReq->uOwner.hQueue == pQueue); PRTREQ pCur = pReq; pReq = pReq->pNext; pCur->pNext = pReqs; pReqs = pCur; } /* * Process the requests. */ while (pReqs) { /* Unchain the first request and advance the list. */ pReq = pReqs; pReqs = pReqs->pNext; pReq->pNext = NULL; /* Process the request */ rc = rtReqProcessOne(pReq); AssertRC(rc); if (rc != VINF_SUCCESS) break; /** @todo r=bird: we're dropping requests here! Add 2nd queue that can hold them. (will fix when writing a testcase) */ } } LogFlow(("RTReqQueueProcess: returns %Rrc\n", rc)); return rc; }
/** * Process pending items in one queue. * * @returns Success indicator. * If false the item the consumer said "enough!". * @param pQueue The queue. */ static bool pdmR3QueueFlush(PPDMQUEUE pQueue) { STAM_PROFILE_START(&pQueue->StatFlushPrf,p); /* * Get the lists. */ PPDMQUEUEITEMCORE pItems = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE); RTRCPTR pItemsRC = ASMAtomicXchgRCPtr(&pQueue->pPendingRC, NIL_RTRCPTR); RTR0PTR pItemsR0 = ASMAtomicXchgR0Ptr(&pQueue->pPendingR0, NIL_RTR0PTR); AssertMsgReturn( pItemsR0 || pItemsRC || pItems, ("Someone is racing us? This shouldn't happen!\n"), true); /* * Reverse the list (it's inserted in LIFO order to avoid semaphores, remember). */ PPDMQUEUEITEMCORE pCur = pItems; pItems = NULL; while (pCur) { PPDMQUEUEITEMCORE pInsert = pCur; pCur = pCur->pNextR3; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Do the same for any pending RC items. */ while (pItemsRC) { PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperRCToR3(pQueue->pVMR3, pItemsRC); pItemsRC = pInsert->pNextRC; pInsert->pNextRC = NIL_RTRCPTR; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Do the same for any pending R0 items. */ while (pItemsR0) { PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperR0ToR3(pQueue->pVMR3, pItemsR0); pItemsR0 = pInsert->pNextR0; pInsert->pNextR0 = NIL_RTR0PTR; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Feed the items to the consumer function. */ Log2(("pdmR3QueueFlush: pQueue=%p enmType=%d pItems=%p\n", pQueue, pQueue->enmType, pItems)); switch (pQueue->enmType) { case PDMQUEUETYPE_DEV: while (pItems) { if (!pQueue->u.Dev.pfnCallback(pQueue->u.Dev.pDevIns, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_DRV: while (pItems) { if (!pQueue->u.Drv.pfnCallback(pQueue->u.Drv.pDrvIns, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_INTERNAL: while (pItems) { if (!pQueue->u.Int.pfnCallback(pQueue->pVMR3, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_EXTERNAL: while (pItems) { if (!pQueue->u.Ext.pfnCallback(pQueue->u.Ext.pvUser, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; default: AssertMsgFailed(("Invalid queue type %d\n", pQueue->enmType)); break; } /* * Success? */ if (pItems) { /* * Reverse the list. */ pCur = pItems; pItems = NULL; while (pCur) { PPDMQUEUEITEMCORE pInsert = pCur; pCur = pInsert->pNextR3; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Insert the list at the tail of the pending list. */ for (;;) { if (ASMAtomicCmpXchgPtr(&pQueue->pPendingR3, pItems, NULL)) break; PPDMQUEUEITEMCORE pPending = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE); if (pPending) { pCur = pPending; while (pCur->pNextR3) pCur = pCur->pNextR3; pCur->pNextR3 = pItems; pItems = pPending; } } STAM_REL_COUNTER_INC(&pQueue->StatFlushLeftovers); STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p); return false; } STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p); return true; }
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs) { int rc = VINF_SUCCESS; PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; /* Parameter checks */ AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE); AssertReturn(cReqs != 0, VERR_INVALID_POINTER); AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER); rtFileAioCtxDump(pCtxInt); /* Check that we don't exceed the limit */ if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests) return VERR_FILE_AIO_LIMIT_EXCEEDED; PRTFILEAIOREQINTERNAL pHead = NULL; do { int rcPosix = 0; size_t cReqsSubmit = 0; size_t i = 0; PRTFILEAIOREQINTERNAL pReqInt; while ( (i < cReqs) && (i < AIO_LISTIO_MAX)) { pReqInt = pahReqs[i]; if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt)) { /* Undo everything and stop submitting. */ for (size_t iUndo = 0; iUndo < i; iUndo++) { pReqInt = pahReqs[iUndo]; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); pReqInt->pCtxInt = NULL; /* Unlink from the list again. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; } rc = VERR_INVALID_HANDLE; break; } pReqInt->pCtxInt = pCtxInt; if (pReqInt->fFlush) break; /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); cReqsSubmit++; i++; } if (cReqsSubmit) { rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL); if (RT_UNLIKELY(rcPosix < 0)) { size_t cReqsSubmitted = cReqsSubmit; if (errno == EAGAIN) rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; else rc = RTErrConvertFromErrno(errno); /* Check which ones were not submitted. */ for (i = 0; i < cReqsSubmit; i++) { pReqInt = pahReqs[i]; rcPosix = aio_error(&pReqInt->AioCB); if ((rcPosix != EINPROGRESS) && (rcPosix != 0)) { cReqsSubmitted--; #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) if (errno == EINVAL) #else if (rcPosix == EINVAL) #endif { /* Was not submitted. */ RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { /* An error occurred. */ RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); /* * Looks like Apple and glibc interpret the standard in different ways. * glibc returns the error code which would be in errno but Apple returns * -1 and sets errno to the appropriate value */ #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) Assert(rcPosix == -1); pReqInt->Rc = RTErrConvertFromErrno(errno); #elif defined(RT_OS_LINUX) pReqInt->Rc = RTErrConvertFromErrno(rcPosix); #endif pReqInt->cbTransfered = 0; } /* Unlink from the list. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; pReqInt->pNext = NULL; pReqInt->pPrev = NULL; } } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); break; } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs -= cReqsSubmit; pahReqs += cReqsSubmit; } /* * Check if we have a flush request now. * If not we hit the AIO_LISTIO_MAX limit * and will continue submitting requests * above. */ if (cReqs && RT_SUCCESS_NP(rc)) { pReqInt = pahReqs[0]; if (pReqInt->fFlush) { /* * lio_listio does not work with flush requests so * we have to use aio_fsync directly. */ rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB); if (RT_UNLIKELY(rcPosix < 0)) { if (errno == EAGAIN) { rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { rc = RTErrConvertFromErrno(errno); RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pReqInt->Rc = rc; } pReqInt->cbTransfered = 0; break; } /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); ASMAtomicIncS32(&pCtxInt->cRequests); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs--; pahReqs++; } } } while ( cReqs && RT_SUCCESS_NP(rc)); if (pHead) { /* * Forward successfully submitted requests to the thread waiting for requests. * We search for a free slot first and if we don't find one * we will grab the first one and append our list to the existing entries. */ unsigned iSlot = 0; while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead)) && !ASMAtomicCmpXchgPtr(&pCtxInt->apReqsNewHead[iSlot], pHead, NULL)) iSlot++; if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead)) { /* Nothing found. */ PRTFILEAIOREQINTERNAL pOldHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[0], NULL, PRTFILEAIOREQINTERNAL); /* Find the end of the current head and link the old list to the current. */ PRTFILEAIOREQINTERNAL pTail = pHead; while (pTail->pNext) pTail = pTail->pNext; pTail->pNext = pOldHead; ASMAtomicWritePtr(&pCtxInt->apReqsNewHead[0], pHead); } /* Set the internal wakeup flag and wakeup the thread if possible. */ bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true); if (!fWokenUp) rtFileAioCtxWakeup(pCtxInt); } rtFileAioCtxDump(pCtxInt); return rc; }
/** * Internal worker processing events and inserting new requests into the waiting list. */ static int rtFileAioCtxProcessEvents(PRTFILEAIOCTXINTERNAL pCtxInt) { int rc = VINF_SUCCESS; /* Process new requests first. */ bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, false); if (fWokenUp) { for (unsigned iSlot = 0; iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead); iSlot++) { PRTFILEAIOREQINTERNAL pReqHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[iSlot], NULL, PRTFILEAIOREQINTERNAL); while ( (pCtxInt->iFirstFree < pCtxInt->cReqsWaitMax) && pReqHead) { RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED); pCtxInt->apReqs[pCtxInt->iFirstFree] = pReqHead; pReqHead->iWaitingList = pCtxInt->iFirstFree; pReqHead = pReqHead->pNext; /* Clear pointer to next and previous element just for safety. */ pCtxInt->apReqs[pCtxInt->iFirstFree]->pNext = NULL; pCtxInt->apReqs[pCtxInt->iFirstFree]->pPrev = NULL; pCtxInt->iFirstFree++; Assert( (pCtxInt->iFirstFree <= pCtxInt->cMaxRequests) && (pCtxInt->iFirstFree <= pCtxInt->cReqsWaitMax)); } /* Append the rest to the wait list. */ if (pReqHead) { RTFIELAIOREQ_ASSERT_STATE(pReqHead, SUBMITTED); if (!pCtxInt->pReqsWaitHead) { Assert(!pCtxInt->pReqsWaitTail); pCtxInt->pReqsWaitHead = pReqHead; pReqHead->pPrev = NULL; } else { AssertPtr(pCtxInt->pReqsWaitTail); pCtxInt->pReqsWaitTail->pNext = pReqHead; pReqHead->pPrev = pCtxInt->pReqsWaitTail; } /* Update tail. */ while (pReqHead->pNext) { RTFIELAIOREQ_ASSERT_STATE(pReqHead->pNext, SUBMITTED); pReqHead = pReqHead->pNext; } pCtxInt->pReqsWaitTail = pReqHead; pCtxInt->pReqsWaitTail->pNext = NULL; } } /* Check if a request needs to be canceled. */ PRTFILEAIOREQINTERNAL pReqToCancel = ASMAtomicReadPtrT(&pCtxInt->pReqToCancel, PRTFILEAIOREQINTERNAL); if (pReqToCancel) { /* The request can be in the array waiting for completion or still in the list because it is full. */ if (pReqToCancel->iWaitingList != RTFILEAIOCTX_WAIT_ENTRY_INVALID) { /* Put it out of the waiting list. */ pCtxInt->apReqs[pReqToCancel->iWaitingList] = pCtxInt->apReqs[--pCtxInt->iFirstFree]; pCtxInt->apReqs[pReqToCancel->iWaitingList]->iWaitingList = pReqToCancel->iWaitingList; } else { /* Unlink from the waiting list. */ PRTFILEAIOREQINTERNAL pPrev = pReqToCancel->pPrev; PRTFILEAIOREQINTERNAL pNext = pReqToCancel->pNext; if (pNext) pNext->pPrev = pPrev; else { /* We canceled the tail. */ pCtxInt->pReqsWaitTail = pPrev; } if (pPrev) pPrev->pNext = pNext; else { /* We canceled the head. */ pCtxInt->pReqsWaitHead = pNext; } } ASMAtomicDecS32(&pCtxInt->cRequests); AssertMsg(pCtxInt->cRequests >= 0, ("Canceled request not which is not in this context\n")); RTSemEventSignal(pCtxInt->SemEventCancel); } } else { if (ASMAtomicXchgBool(&pCtxInt->fWokenUp, false)) rc = VERR_INTERRUPTED; } return rc; }