/** * Termination request. * * @return true if we're ok with shutting down now, false if we're not. * @param fOptions Flags. */ bool org_virtualbox_SupDrv::terminate(IOOptionBits fOptions) { bool fRc; LogFlow(("org_virtualbox_SupDrv::terminate: reference_count=%d g_cSessions=%d (fOptions=%#x)\n", KMOD_INFO_NAME.reference_count, ASMAtomicUoReadS32(&g_cSessions), fOptions)); if ( KMOD_INFO_NAME.reference_count != 0 || ASMAtomicUoReadS32(&g_cSessions)) fRc = false; else fRc = IOService::terminate(fOptions); LogFlow(("org_virtualbox_SupDrv::terminate: returns %d\n", fRc)); return fRc; }
/** * Yield the critical section if someone is waiting on it. * * When yielding, we'll leave the critical section and try to make sure the * other waiting threads get a chance of entering before we reclaim it. * * @retval true if yielded. * @retval false if not yielded. * @param pCritSect The critical section. */ VMMR3DECL(bool) PDMR3CritSectYield(PPDMCRITSECT pCritSect) { AssertPtrReturn(pCritSect, false); AssertReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, false); Assert(pCritSect->s.Core.NativeThreadOwner == RTThreadNativeSelf()); Assert(!(pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP)); /* No recursion allowed here. */ int32_t const cNestings = pCritSect->s.Core.cNestings; AssertReturn(cNestings == 1, false); int32_t const cLockers = ASMAtomicReadS32(&pCritSect->s.Core.cLockers); if (cLockers < cNestings) return false; #ifdef PDMCRITSECT_STRICT RTLOCKVALSRCPOS const SrcPos = pCritSect->s.Core.pValidatorRec->SrcPos; #endif PDMCritSectLeave(pCritSect); /* * If we're lucky, then one of the waiters has entered the lock already. * We spin a little bit in hope for this to happen so we can avoid the * yield detour. */ if (ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0) { int cLoops = 20; while ( cLoops > 0 && ASMAtomicUoReadS32(&pCritSect->s.Core.cNestings) == 0 && ASMAtomicUoReadS32(&pCritSect->s.Core.cLockers) >= 0) { ASMNopPause(); cLoops--; } if (cLoops == 0) RTThreadYield(); } #ifdef PDMCRITSECT_STRICT int rc = PDMCritSectEnterDebug(pCritSect, VERR_IGNORED, SrcPos.uId, SrcPos.pszFile, SrcPos.uLine, SrcPos.pszFunction); #else int rc = PDMCritSectEnter(pCritSect, VERR_IGNORED); #endif AssertLogRelRC(rc); return true; }
/** * Locks the stuff for reading. * * This is just cheap stuff to make sure the caller is doing the right thing. */ DECLINLINE(void) rtstrFormatTypeReadLock(void) { #if defined(RTSTRFORMATTYPE_WITH_LOCKING) if (RT_UNLIKELY(ASMAtomicIncS32(&g_i32Spinlock) < 0)) { unsigned volatile i; AssertFailed(); for (i = 0;; i++) if (ASMAtomicUoReadS32(&g_i32Spinlock) > 0) break; } #endif }
/** * Checks whether the VBoxNetAdp wossname can be unloaded. * * This will return false if someone is currently using the module. * * @returns true if it's relatively safe to unload it, otherwise false. * @param pGlobals Pointer to the globals. */ DECLHIDDEN(bool) vboxNetAdpCanUnload(PVBOXNETADPGLOBALS pGlobals) { bool fRc = true; /* Assume it can be unloaded. */ unsigned i; for (i = 0; i < RT_ELEMENTS(pGlobals->aAdapters); i++) { PVBOXNETADP pThis = &pGlobals->aAdapters[i]; if (vboxNetAdpGetStateWithLock(&pGlobals->aAdapters[i]) >= kVBoxNetAdpState_Connected) { fRc = false; break; /* We already know the answer. */ } } return fRc && ASMAtomicUoReadS32((int32_t volatile *)&pGlobals->cFactoryRefs) <= 0; }
DECLINLINE(int) rtSemEventLnxMultiWait(struct RTSEMEVENTMULTIINTERNAL *pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); /* * Quickly check whether it's signaled. */ int32_t iCur = ASMAtomicUoReadS32(&pThis->iState); Assert(iCur == 0 || iCur == -1 || iCur == 1); if (iCur == -1) return VINF_SUCCESS; /* * Check and convert the timeout value. */ struct timespec ts; struct timespec *pTimeout = NULL; uint64_t u64Deadline = 0; /* shut up gcc */ if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) { /* If the timeout is zero, then we're done. */ if (!uTimeout) return VERR_TIMEOUT; /* Convert it to a deadline + interval timespec. */ if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout != UINT64_MAX) /* unofficial way of indicating an indefinite wait */ { if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) u64Deadline = RTTimeSystemNanoTS() + uTimeout; else { uint64_t u64Now = RTTimeSystemNanoTS(); if (uTimeout <= u64Now) return VERR_TIMEOUT; u64Deadline = uTimeout; uTimeout -= u64Now; } if ( sizeof(ts.tv_sec) >= sizeof(uint64_t) || uTimeout <= UINT64_C(1000000000) * UINT32_MAX) { ts.tv_nsec = uTimeout % UINT32_C(1000000000); ts.tv_sec = uTimeout / UINT32_C(1000000000); pTimeout = &ts; } } } /* * The wait loop. */ #ifdef RTSEMEVENTMULTI_STRICT RTTHREAD hThreadSelf = RTThreadSelfAutoAdopt(); #else RTTHREAD hThreadSelf = RTThreadSelf(); #endif for (unsigned i = 0;; i++) { /* * Start waiting. We only account for there being or having been * threads waiting on the semaphore to keep things simple. */ iCur = ASMAtomicUoReadS32(&pThis->iState); Assert(iCur == 0 || iCur == -1 || iCur == 1); if ( iCur == 1 || ASMAtomicCmpXchgS32(&pThis->iState, 1, 0)) { /* adjust the relative timeout */ if (pTimeout) { int64_t i64Diff = u64Deadline - RTTimeSystemNanoTS(); if (i64Diff < 1000) return VERR_TIMEOUT; ts.tv_sec = (uint64_t)i64Diff / UINT32_C(1000000000); ts.tv_nsec = (uint64_t)i64Diff % UINT32_C(1000000000); } #ifdef RTSEMEVENTMULTI_STRICT if (pThis->fEverHadSignallers) { int rc9 = RTLockValidatorRecSharedCheckBlocking(&pThis->Signallers, hThreadSelf, pSrcPos, false, uTimeout / UINT32_C(1000000), RTTHREADSTATE_EVENT_MULTI, true); if (RT_FAILURE(rc9)) return rc9; } #endif RTThreadBlocking(hThreadSelf, RTTHREADSTATE_EVENT_MULTI, true); long rc = sys_futex(&pThis->iState, FUTEX_WAIT, 1, pTimeout, NULL, 0); RTThreadUnblocked(hThreadSelf, RTTHREADSTATE_EVENT_MULTI); if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)) return VERR_SEM_DESTROYED; if (rc == 0) return VINF_SUCCESS; /* * Act on the wakup code. */ if (rc == -ETIMEDOUT) { /** @todo something is broken here. shows up every now and again in the ata * code. Should try to run the timeout against RTTimeMilliTS to * check that it's doing the right thing... */ Assert(pTimeout); return VERR_TIMEOUT; } if (rc == -EWOULDBLOCK) /* retry, the value changed. */; else if (rc == -EINTR) { if (fFlags & RTSEMWAIT_FLAGS_NORESUME) return VERR_INTERRUPTED; } else { /* this shouldn't happen! */ AssertMsgFailed(("rc=%ld errno=%d\n", rc, errno)); return RTErrConvertFromErrno(rc); } } else if (iCur == -1) return VINF_SUCCESS; } }
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies, PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs) { /* * Validate the parameters, making sure to always set pcReqs. */ AssertPtrReturn(pcReqs, VERR_INVALID_POINTER); *pcReqs = 0; /* always set */ PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; RTFILEAIOCTX_VALID_RETURN(pCtxInt); AssertPtrReturn(pahReqs, VERR_INVALID_POINTER); AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER); AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE); /* * Can't wait if there are no requests around. */ if ( RT_UNLIKELY(ASMAtomicUoReadS32(&pCtxInt->cRequests) == 0) && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS)) return VERR_FILE_AIO_NO_REQUEST; /* Wait for at least one. */ if (!cMinReqs) cMinReqs = 1; /* * Loop until we're woken up, hit an error (incl timeout), or * have collected the desired number of requests. */ int rc = VINF_SUCCESS; int cRequestsCompleted = 0; while ( !pCtxInt->fWokenUp && cMinReqs > 0) { uint64_t StartNanoTS = 0; DWORD dwTimeout = cMillies == RT_INDEFINITE_WAIT ? INFINITE : cMillies; DWORD cbTransfered; LPOVERLAPPED pOverlapped; ULONG_PTR lCompletionKey; BOOL fSucceeded; if (cMillies != RT_INDEFINITE_WAIT) StartNanoTS = RTTimeNanoTS(); ASMAtomicXchgBool(&pCtxInt->fWaiting, true); fSucceeded = GetQueuedCompletionStatus(pCtxInt->hIoCompletionPort, &cbTransfered, &lCompletionKey, &pOverlapped, dwTimeout); ASMAtomicXchgBool(&pCtxInt->fWaiting, false); if ( !fSucceeded && !pOverlapped) { /* The call failed to dequeue a completion packet, includes VERR_TIMEOUT */ rc = RTErrConvertFromWin32(GetLastError()); break; } /* Check if we got woken up. */ if (lCompletionKey == AIO_CONTEXT_WAKEUP_EVENT) { Assert(fSucceeded && !pOverlapped); break; } /* A request completed. */ PRTFILEAIOREQINTERNAL pReqInt = OVERLAPPED_2_RTFILEAIOREQINTERNAL(pOverlapped); AssertPtr(pReqInt); Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC); /* Mark the request as finished. */ RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pReqInt->cbTransfered = cbTransfered; if (fSucceeded) pReqInt->Rc = VINF_SUCCESS; else { DWORD errCode = GetLastError(); pReqInt->Rc = RTErrConvertFromWin32(errCode); if (pReqInt->Rc == VERR_UNRESOLVED_ERROR) LogRel(("AIO/win: Request %#p returned rc=%Rrc (native %u\n)", pReqInt, pReqInt->Rc, errCode)); } pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt; /* Update counter. */ cMinReqs--; if (cMillies != RT_INDEFINITE_WAIT) { /* Recalculate timeout. */ uint64_t NanoTS = RTTimeNanoTS(); uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000; if (cMilliesElapsed < cMillies) cMillies -= cMilliesElapsed; else cMillies = 0; } } /* * Update the context state and set the return value. */ *pcReqs = cRequestsCompleted; ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted); /* * Clear the wakeup flag and set rc. */ bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUp, false); if ( fWokenUp && RT_SUCCESS(rc)) rc = VERR_INTERRUPTED; return rc; }
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs) { int rc = VINF_SUCCESS; PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; /* Parameter checks */ AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE); AssertReturn(cReqs != 0, VERR_INVALID_POINTER); AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER); rtFileAioCtxDump(pCtxInt); /* Check that we don't exceed the limit */ if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests) return VERR_FILE_AIO_LIMIT_EXCEEDED; PRTFILEAIOREQINTERNAL pHead = NULL; do { int rcPosix = 0; size_t cReqsSubmit = 0; size_t i = 0; PRTFILEAIOREQINTERNAL pReqInt; while ( (i < cReqs) && (i < AIO_LISTIO_MAX)) { pReqInt = pahReqs[i]; if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt)) { /* Undo everything and stop submitting. */ for (size_t iUndo = 0; iUndo < i; iUndo++) { pReqInt = pahReqs[iUndo]; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); pReqInt->pCtxInt = NULL; /* Unlink from the list again. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; } rc = VERR_INVALID_HANDLE; break; } pReqInt->pCtxInt = pCtxInt; if (pReqInt->fFlush) break; /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); cReqsSubmit++; i++; } if (cReqsSubmit) { rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL); if (RT_UNLIKELY(rcPosix < 0)) { size_t cReqsSubmitted = cReqsSubmit; if (errno == EAGAIN) rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; else rc = RTErrConvertFromErrno(errno); /* Check which ones were not submitted. */ for (i = 0; i < cReqsSubmit; i++) { pReqInt = pahReqs[i]; rcPosix = aio_error(&pReqInt->AioCB); if ((rcPosix != EINPROGRESS) && (rcPosix != 0)) { cReqsSubmitted--; #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) if (errno == EINVAL) #else if (rcPosix == EINVAL) #endif { /* Was not submitted. */ RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { /* An error occurred. */ RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); /* * Looks like Apple and glibc interpret the standard in different ways. * glibc returns the error code which would be in errno but Apple returns * -1 and sets errno to the appropriate value */ #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) Assert(rcPosix == -1); pReqInt->Rc = RTErrConvertFromErrno(errno); #elif defined(RT_OS_LINUX) pReqInt->Rc = RTErrConvertFromErrno(rcPosix); #endif pReqInt->cbTransfered = 0; } /* Unlink from the list. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; pReqInt->pNext = NULL; pReqInt->pPrev = NULL; } } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); break; } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs -= cReqsSubmit; pahReqs += cReqsSubmit; } /* * Check if we have a flush request now. * If not we hit the AIO_LISTIO_MAX limit * and will continue submitting requests * above. */ if (cReqs && RT_SUCCESS_NP(rc)) { pReqInt = pahReqs[0]; if (pReqInt->fFlush) { /* * lio_listio does not work with flush requests so * we have to use aio_fsync directly. */ rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB); if (RT_UNLIKELY(rcPosix < 0)) { if (errno == EAGAIN) { rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { rc = RTErrConvertFromErrno(errno); RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pReqInt->Rc = rc; } pReqInt->cbTransfered = 0; break; } /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); ASMAtomicIncS32(&pCtxInt->cRequests); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs--; pahReqs++; } } } while ( cReqs && RT_SUCCESS_NP(rc)); if (pHead) { /* * Forward successfully submitted requests to the thread waiting for requests. * We search for a free slot first and if we don't find one * we will grab the first one and append our list to the existing entries. */ unsigned iSlot = 0; while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead)) && !ASMAtomicCmpXchgPtr(&pCtxInt->apReqsNewHead[iSlot], pHead, NULL)) iSlot++; if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead)) { /* Nothing found. */ PRTFILEAIOREQINTERNAL pOldHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[0], NULL, PRTFILEAIOREQINTERNAL); /* Find the end of the current head and link the old list to the current. */ PRTFILEAIOREQINTERNAL pTail = pHead; while (pTail->pNext) pTail = pTail->pNext; pTail->pNext = pOldHead; ASMAtomicWritePtr(&pCtxInt->apReqsNewHead[0], pHead); } /* Set the internal wakeup flag and wakeup the thread if possible. */ bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true); if (!fWokenUp) rtFileAioCtxWakeup(pCtxInt); } rtFileAioCtxDump(pCtxInt); return rc; }
RTDECL(int) RTFileAioCtxWait(RTFILEAIOCTX hAioCtx, size_t cMinReqs, RTMSINTERVAL cMillies, PRTFILEAIOREQ pahReqs, size_t cReqs, uint32_t *pcReqs) { /* * Validate the parameters, making sure to always set pcReqs. */ AssertPtrReturn(pcReqs, VERR_INVALID_POINTER); *pcReqs = 0; /* always set */ PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; RTFILEAIOCTX_VALID_RETURN(pCtxInt); AssertPtrReturn(pahReqs, VERR_INVALID_POINTER); AssertReturn(cReqs != 0, VERR_INVALID_PARAMETER); AssertReturn(cReqs >= cMinReqs, VERR_OUT_OF_RANGE); /* * Can't wait if there are not requests around. */ if ( RT_UNLIKELY(ASMAtomicUoReadS32(&pCtxInt->cRequests) == 0) && !(pCtxInt->fFlags & RTFILEAIOCTX_FLAGS_WAIT_WITHOUT_PENDING_REQUESTS)) return VERR_FILE_AIO_NO_REQUEST; /* * Convert the timeout if specified. */ struct timespec *pTimeout = NULL; struct timespec Timeout = {0,0}; uint64_t StartNanoTS = 0; if (cMillies != RT_INDEFINITE_WAIT) { Timeout.tv_sec = cMillies / 1000; Timeout.tv_nsec = cMillies % 1000 * 1000000; pTimeout = &Timeout; StartNanoTS = RTTimeNanoTS(); } /* Wait for at least one. */ if (!cMinReqs) cMinReqs = 1; /* For the wakeup call. */ Assert(pCtxInt->hThreadWait == NIL_RTTHREAD); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, RTThreadSelf()); /* * Loop until we're woken up, hit an error (incl timeout), or * have collected the desired number of requests. */ int rc = VINF_SUCCESS; int cRequestsCompleted = 0; while (!pCtxInt->fWokenUp) { LNXKAIOIOEVENT aPortEvents[AIO_MAXIMUM_REQUESTS_PER_CONTEXT]; int cRequestsToWait = RT_MIN(cReqs, AIO_MAXIMUM_REQUESTS_PER_CONTEXT); ASMAtomicXchgBool(&pCtxInt->fWaiting, true); rc = rtFileAsyncIoLinuxGetEvents(pCtxInt->AioContext, cMinReqs, cRequestsToWait, &aPortEvents[0], pTimeout); ASMAtomicXchgBool(&pCtxInt->fWaiting, false); if (RT_FAILURE(rc)) break; uint32_t const cDone = rc; rc = VINF_SUCCESS; /* * Process received events / requests. */ for (uint32_t i = 0; i < cDone; i++) { /* * The iocb is the first element in our request structure. * So we can safely cast it directly to the handle (see above) */ PRTFILEAIOREQINTERNAL pReqInt = (PRTFILEAIOREQINTERNAL)aPortEvents[i].pIoCB; AssertPtr(pReqInt); Assert(pReqInt->u32Magic == RTFILEAIOREQ_MAGIC); /** @todo aeichner: The rc field contains the result code * like you can find in errno for the normal read/write ops. * But there is a second field called rc2. I don't know the * purpose for it yet. */ if (RT_UNLIKELY(aPortEvents[i].rc < 0)) pReqInt->Rc = RTErrConvertFromErrno(-aPortEvents[i].rc); /* Convert to positive value. */ else { pReqInt->Rc = VINF_SUCCESS; pReqInt->cbTransfered = aPortEvents[i].rc; } /* Mark the request as finished. */ RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pahReqs[cRequestsCompleted++] = (RTFILEAIOREQ)pReqInt; } /* * Done Yet? If not advance and try again. */ if (cDone >= cMinReqs) break; cMinReqs -= cDone; cReqs -= cDone; if (cMillies != RT_INDEFINITE_WAIT) { /* The API doesn't return ETIMEDOUT, so we have to fix that ourselves. */ uint64_t NanoTS = RTTimeNanoTS(); uint64_t cMilliesElapsed = (NanoTS - StartNanoTS) / 1000000; if (cMilliesElapsed >= cMillies) { rc = VERR_TIMEOUT; break; } /* The syscall supposedly updates it, but we're paranoid. :-) */ Timeout.tv_sec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) / 1000; Timeout.tv_nsec = (cMillies - (RTMSINTERVAL)cMilliesElapsed) % 1000 * 1000000; } } /* * Update the context state and set the return value. */ *pcReqs = cRequestsCompleted; ASMAtomicSubS32(&pCtxInt->cRequests, cRequestsCompleted); Assert(pCtxInt->hThreadWait == RTThreadSelf()); ASMAtomicWriteHandle(&pCtxInt->hThreadWait, NIL_RTTHREAD); /* * Clear the wakeup flag and set rc. */ if ( pCtxInt->fWokenUp && RT_SUCCESS(rc)) { ASMAtomicXchgBool(&pCtxInt->fWokenUp, false); rc = VERR_INTERRUPTED; } return rc; }