RTDECL(int) RTMpOnSpecific(RTCPUID idCpu, PFNRTMPWORKER pfnWorker, void *pvUser1, void *pvUser2) { RTMPARGS Args; RT_ASSERT_INTS_ON(); if (idCpu >= ncpus) return VERR_CPU_NOT_FOUND; if (RT_UNLIKELY(!RTMpIsCpuOnline(idCpu))) return RTMpIsCpuPresent(idCpu) ? VERR_CPU_OFFLINE : VERR_CPU_NOT_FOUND; Args.pfnWorker = pfnWorker; Args.pvUser1 = pvUser1; Args.pvUser2 = pvUser2; Args.idCpu = idCpu; Args.cHits = 0; RTTHREADPREEMPTSTATE PreemptState = RTTHREADPREEMPTSTATE_INITIALIZER; RTThreadPreemptDisable(&PreemptState); RTSOLCPUSET CpuSet; for (int i = 0; i < IPRT_SOL_SET_WORDS; i++) CpuSet.auCpus[i] = 0; BT_SET(CpuSet.auCpus, idCpu); rtMpSolCrossCall(&CpuSet, rtMpSolOnSpecificCpuWrapper, &Args); RTThreadPreemptRestore(&PreemptState); Assert(ASMAtomicUoReadU32(&Args.cHits) <= 1); return ASMAtomicUoReadU32(&Args.cHits) == 1 ? VINF_SUCCESS : VERR_CPU_NOT_FOUND; }
/** * Driver select hook. * * @param cookie The session. * @param event The event. * @param ref ??? * @param sync ??? * * @return Haiku status code. */ static status_t vgdrvHaikuSelect(void *cookie, uint8 event, uint32 ref, selectsync *sync) { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)cookie; status_t err = B_OK; switch (event) { case B_SELECT_READ: break; default: return EINVAL; } RTSpinlockAcquire(g_DevExt.SessionSpinlock); uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (pSession->u32MousePosChangedSeq != u32CurSeq) { pSession->u32MousePosChangedSeq = u32CurSeq; notify_select_event(sync, event); } else if (sState.selectSync == NULL) { sState.selectEvent = (uint8_t)event; sState.selectRef = (uint32_t)ref; sState.selectSync = (void *)sync; } else err = B_WOULD_BLOCK; RTSpinlockRelease(g_DevExt.SessionSpinlock); return err; }
DECLINLINE(int) rtSemEventMultiPosixWait(RTSEMEVENTMULTI hEventMultiSem, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ struct RTSEMEVENTMULTIINTERNAL *pThis = hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); uint32_t u32 = pThis->u32State; AssertReturn(u32 == EVENTMULTI_STATE_NOT_SIGNALED || u32 == EVENTMULTI_STATE_SIGNALED, VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); /* * Optimize the case where the event is signalled. */ if (ASMAtomicUoReadU32(&pThis->u32State) == EVENTMULTI_STATE_SIGNALED) { int rc = rtSemEventMultiPosixWaitPoll(pThis); if (RT_LIKELY(rc != VERR_TIMEOUT)) return rc; } /* * Indefinite or timed wait? */ if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE) return rtSemEventMultiPosixWaitIndefinite(pThis, fFlags, pSrcPos); return rtSemEventMultiPosixWaitTimed(pThis, fFlags, uTimeout, pSrcPos); }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; RT_ASSERT_PREEMPT_CPUID_VAR(); AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_INTS_ON(); rtR0SemEventMultiSolRetain(pThis); rtR0SemSolWaitEnterMutexWithUnpinningHack(&pThis->Mtx); Assert(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC); /* * Do the job. */ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTISOL_GEN_SHIFT; fNew |= RTSEMEVENTMULTISOL_STATE_MASK; ASMAtomicWriteU32(&pThis->fStateAndGen, fNew); cv_broadcast(&pThis->Cnd); mutex_exit(&pThis->Mtx); rtR0SemEventMultiSolRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
static int VBoxGuestSolarisPoll(dev_t Dev, short fEvents, int fAnyYet, short *pReqEvents, struct pollhead **ppPollHead) { LogFlow((DEVICE_NAME "::Poll: fEvents=%d fAnyYet=%d\n", fEvents, fAnyYet)); vboxguest_state_t *pState = ddi_get_soft_state(g_pVBoxGuestSolarisState, getminor(Dev)); if (RT_LIKELY(pState)) { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pState->pSession; uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (pSession->u32MousePosChangedSeq != u32CurSeq) { *pReqEvents |= (POLLIN | POLLRDNORM); pSession->u32MousePosChangedSeq = u32CurSeq; } else { *pReqEvents = 0; if (!fAnyYet) *ppPollHead = &g_PollHead; } return 0; } else { Log((DEVICE_NAME "::Poll: no state data for %d\n", getminor(Dev))); return EINVAL; } }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); RT_ASSERT_PREEMPT_CPUID_VAR(); RT_ASSERT_INTS_ON(); rtR0SemEventMultiDarwinRetain(pThis); lck_spin_lock(pThis->pSpinlock); /* * Set the signal and increment the generation counter. */ uint32_t fNew = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTIDARWIN_GEN_SHIFT; fNew |= RTSEMEVENTMULTIDARWIN_STATE_MASK; ASMAtomicWriteU32(&pThis->fStateAndGen, fNew); /* * Wake up all sleeping threads. */ if (pThis->fHaveBlockedThreads) { ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, false); thread_wakeup_prim((event_t)pThis, FALSE /* all threads */, THREAD_AWAKENED); } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); RT_ASSERT_PREEMPT_CPUID(); return VINF_SUCCESS; }
static int vgdrvFreeBSDPoll(struct cdev *pDev, int fEvents, struct thread *td) { int fEventsProcessed; LogFlow(("vgdrvFreeBSDPoll: fEvents=%d\n", fEvents)); PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pDev->si_drv1; if (RT_UNLIKELY(!VALID_PTR(pSession))) { Log(("vgdrvFreeBSDPoll: no state data for %s\n", devtoname(pDev))); return (fEvents & (POLLHUP|POLLIN|POLLRDNORM|POLLOUT|POLLWRNORM)); } uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (pSession->u32MousePosChangedSeq != u32CurSeq) { fEventsProcessed = fEvents & (POLLIN | POLLRDNORM); pSession->u32MousePosChangedSeq = u32CurSeq; } else { fEventsProcessed = 0; selrecord(td, &g_SelInfo); } return fEventsProcessed; }
RTDECL(int) RTSemEventMultiSignal(RTSEMEVENTMULTI hEventMultiSem) { uint32_t fNew; uint32_t fOld; /* * Validate input. */ PRTSEMEVENTMULTIINTERNAL pThis = (PRTSEMEVENTMULTIINTERNAL)hEventMultiSem; if (!pThis) return VERR_INVALID_PARAMETER; AssertPtrReturn(pThis, VERR_INVALID_PARAMETER); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER); rtR0SemEventMultiLnxRetain(pThis); /* * Signal the event object. The cause of the paranoia here is racing to try * deal with racing RTSemEventMultiSignal calls (should probably be * forbidden, but it's relatively easy to handle). */ do { fNew = fOld = ASMAtomicUoReadU32(&pThis->fStateAndGen); fNew += 1 << RTSEMEVENTMULTILNX_GEN_SHIFT; fNew |= RTSEMEVENTMULTILNX_STATE_MASK; } while (!ASMAtomicCmpXchgU32(&pThis->fStateAndGen, fNew, fOld)); wake_up_all(&pThis->Head); rtR0SemEventMultiLnxRelease(pThis); return VINF_SUCCESS; }
/** * ISR handler. * * @return BOOLEAN Indicates whether the IRQ came from us (TRUE) or not (FALSE). * @param pInterrupt Interrupt that was triggered. * @param pServiceContext Context specific pointer. */ BOOLEAN vboxguestwinIsrHandler(PKINTERRUPT pInterrupt, PVOID pServiceContext) { PVBOXGUESTDEVEXT pDevExt = (PVBOXGUESTDEVEXT)pServiceContext; if (pDevExt == NULL) return FALSE; /*Log(("VBoxGuest::vboxguestwinGuestIsrHandler: pDevExt = 0x%p, pVMMDevMemory = 0x%p\n", pDevExt, pDevExt ? pDevExt->pVMMDevMemory : NULL));*/ /* Enter the common ISR routine and do the actual work. */ BOOLEAN fIRQTaken = VBoxGuestCommonISR(pDevExt); /* If we need to wake up some events we do that in a DPC to make * sure we're called at the right IRQL. */ if (fIRQTaken) { Log(("VBoxGuest::vboxguestwinGuestIsrHandler: IRQ was taken! pInterrupt = 0x%p, pDevExt = 0x%p\n", pInterrupt, pDevExt)); if (ASMAtomicUoReadU32(&pDevExt->u32MousePosChangedSeq) || !RTListIsEmpty(&pDevExt->WakeUpList)) { Log(("VBoxGuest::vboxguestwinGuestIsrHandler: Requesting DPC ...\n")); IoRequestDpc(pDevExt->win.s.pDeviceObject, pDevExt->win.s.pCurrentIrp, NULL); } } return fIRQTaken; }
/** * Insert pending notification * * @param pVM Pointer to the VM. * @param pRec Notification record to insert */ static void remNotifyHandlerInsert(PVM pVM, PREMHANDLERNOTIFICATION pRec) { /* * Fetch a free record. */ uint32_t cFlushes = 0; uint32_t idxFree; PREMHANDLERNOTIFICATION pFree; do { idxFree = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList); if (idxFree == UINT32_MAX) { do { cFlushes++; Assert(cFlushes != 128); AssertFatal(cFlushes < _1M); VMMRZCallRing3NoCpu(pVM, VMMCALLRING3_REM_REPLAY_HANDLER_NOTIFICATIONS, 0); idxFree = ASMAtomicUoReadU32(&pVM->rem.s.idxFreeList); } while (idxFree == UINT32_MAX); } pFree = &pVM->rem.s.aHandlerNotifications[idxFree]; } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxFreeList, pFree->idxNext, idxFree)); /* * Copy the record. */ pFree->enmKind = pRec->enmKind; pFree->u = pRec->u; /* * Insert it into the pending list. */ uint32_t idxNext; do { idxNext = ASMAtomicUoReadU32(&pVM->rem.s.idxPendingList); ASMAtomicWriteU32(&pFree->idxNext, idxNext); ASMCompilerBarrier(); } while (!ASMAtomicCmpXchgU32(&pVM->rem.s.idxPendingList, idxFree, idxNext)); VM_FF_SET(pVM, VM_FF_REM_HANDLER_NOTIFY); }
DWORD APIENTRY VBoxDispDDGetBltStatus(PDD_GETBLTSTATUSDATA lpGetBltStatus) { PVBOXDISPDEV pDev = (PVBOXDISPDEV) lpGetBltStatus->lpDD->dhpdev; PVBOXVHWASURFDESC pDesc = (PVBOXVHWASURFDESC)lpGetBltStatus->lpDDSurface->lpGbl->dwReserved1; LOGF_ENTER(); if(lpGetBltStatus->dwFlags == DDGBS_CANBLT) { lpGetBltStatus->ddRVal = DD_OK; } else /* DDGBS_ISBLTDONE */ { if (pDesc) { if(ASMAtomicUoReadU32(&pDesc->cPendingBltsSrc) || ASMAtomicUoReadU32(&pDesc->cPendingBltsDst)) { VBoxDispVHWACommandCheckHostCmds(pDev); if(ASMAtomicUoReadU32(&pDesc->cPendingBltsSrc) || ASMAtomicUoReadU32(&pDesc->cPendingBltsDst)) { lpGetBltStatus->ddRVal = DDERR_WASSTILLDRAWING; } else { lpGetBltStatus->ddRVal = DD_OK; } } else { lpGetBltStatus->ddRVal = DD_OK; } } else { WARN(("!pDesc")); lpGetBltStatus->ddRVal = DDERR_GENERIC; } } LOGF_LEAVE(); return DDHAL_DRIVER_HANDLED; }
/** * Poll function. * * This returns ready to read if the mouse pointer mode or the pointer position * has changed since last call to read. * * @returns 0 if no changes, POLLIN | POLLRDNORM if there are unseen changes. * * @param pFile The file structure. * @param pPt The poll table. * * @remarks This is probably not really used, X11 is said to use the fasync * interface instead. */ static unsigned int vboxguestPoll(struct file *pFile, poll_table *pPt) { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pFile->private_data; uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); unsigned int fMask = pSession->u32MousePosChangedSeq != u32CurSeq ? POLLIN | POLLRDNORM : 0; poll_wait(pFile, &g_PollEventQueue, pPt); return fMask; }
DWORD APIENTRY VBoxDispDDGetFlipStatus(PDD_GETFLIPSTATUSDATA lpGetFlipStatus) { PVBOXDISPDEV pDev = (PVBOXDISPDEV) lpGetFlipStatus->lpDD->dhpdev; PVBOXVHWASURFDESC pDesc = (PVBOXVHWASURFDESC)lpGetFlipStatus->lpDDSurface->lpGbl->dwReserved1; LOGF_ENTER(); /*can't flip is there's a flip pending, so result is same for DDGFS_CANFLIP/DDGFS_ISFLIPDONE */ if (pDesc) { if(ASMAtomicUoReadU32(&pDesc->cPendingFlipsTarg) || ASMAtomicUoReadU32(&pDesc->cPendingFlipsCurr)) { VBoxDispVHWACommandCheckHostCmds(pDev); if(ASMAtomicUoReadU32(&pDesc->cPendingFlipsTarg) || ASMAtomicUoReadU32(&pDesc->cPendingFlipsCurr)) { lpGetFlipStatus->ddRVal = DDERR_WASSTILLDRAWING; } else { lpGetFlipStatus->ddRVal = DD_OK; } } else { lpGetFlipStatus->ddRVal = DD_OK; } } else { WARN(("!pDesc")); lpGetFlipStatus->ddRVal = DDERR_GENERIC; } LOGF_LEAVE(); return DDHAL_DRIVER_HANDLED; }
/** * Driver read hook. * @param cookie The session. * @param position The offset. * @param data Pointer to the data. * @param numBytes Where to store the number of bytes read. * * @return Haiku status code. */ static status_t vgdrvHaikuRead(void *cookie, off_t position, void *data, size_t *numBytes) { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)cookie; if (*numBytes == 0) return B_OK; uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (pSession->u32MousePosChangedSeq != u32CurSeq) { pSession->u32MousePosChangedSeq = u32CurSeq; *numBytes = 1; return B_OK; } *numBytes = 0; return B_OK; }
static int VBoxGuestSolarisRead(dev_t Dev, struct uio *pUio, cred_t *pCred) { LogFlow((DEVICE_NAME "::Read\n")); vboxguest_state_t *pState = ddi_get_soft_state(g_pVBoxGuestSolarisState, getminor(Dev)); if (!pState) { Log((DEVICE_NAME "::Close: failed to get pState.\n")); return EFAULT; } PVBOXGUESTSESSION pSession = pState->pSession; uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (pSession->u32MousePosChangedSeq != u32CurSeq) pSession->u32MousePosChangedSeq = u32CurSeq; return 0; }
/** * Read to go with our poll/fasync response. * * @returns 1 or -EINVAL. * * @param pFile The file structure. * @param pbBuf The buffer to read into. * @param cbRead The max number of bytes to read. * @param poff The current file position. * * @remarks This is probably not really used as X11 lets the driver do its own * event reading. The poll condition is therefore also cleared when we * see VMMDevReq_GetMouseStatus in VbgdCommonIoCtl_VMMRequest. */ static ssize_t vboxguestRead(struct file *pFile, char *pbBuf, size_t cbRead, loff_t *poff) { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pFile->private_data; uint32_t u32CurSeq = ASMAtomicUoReadU32(&g_DevExt.u32MousePosChangedSeq); if (*poff != 0) return -EINVAL; /* * Fake a single byte read if we're not up to date with the current mouse position. */ if ( pSession->u32MousePosChangedSeq != u32CurSeq && cbRead > 0) { pSession->u32MousePosChangedSeq = u32CurSeq; pbBuf[0] = 0; return 1; } return 0; }
/** * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug. * * @returns VBox status code. * @param pThis The event semaphore. * @param fFlags See RTSemEventMultiWaitEx. * @param uTimeout See RTSemEventMultiWaitEx. * @param pSrcPos The source code position of the wait. */ static int rtR0SemEventMultiLnxWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { uint32_t fOrgStateAndGen; int rc; /* * Validate the input. */ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); rtR0SemEventMultiLnxRetain(pThis); /* * Is the event already signalled or do we have to wait? */ fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen); if (fOrgStateAndGen & RTSEMEVENTMULTILNX_STATE_MASK) rc = VINF_SUCCESS; else { /* * We have to wait. */ RTR0SEMLNXWAIT Wait; rc = rtR0SemLnxWaitInit(&Wait, fFlags, uTimeout, &pThis->Head); if (RT_SUCCESS(rc)) { IPRT_DEBUG_SEMS_STATE(pThis, 'E'); for (;;) { /* The destruction test. */ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)) rc = VERR_SEM_DESTROYED; else { rtR0SemLnxWaitPrepare(&Wait); /* Check the exit conditions. */ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)) rc = VERR_SEM_DESTROYED; else if (ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen) rc = VINF_SUCCESS; else if (rtR0SemLnxWaitHasTimedOut(&Wait)) rc = VERR_TIMEOUT; else if (rtR0SemLnxWaitWasInterrupted(&Wait)) rc = VERR_INTERRUPTED; else { /* Do the wait and then recheck the conditions. */ rtR0SemLnxWaitDoIt(&Wait); continue; } } break; } rtR0SemLnxWaitDelete(&Wait); IPRT_DEBUG_SEMS_STATE_RC(pThis, 'E', rc); } } rtR0SemEventMultiLnxRelease(pThis); return rc; }
/** * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug. * * @returns VBox status code. * @param pThis The event semaphore. * @param fFlags See RTSemEventMultiWaitEx. * @param uTimeout See RTSemEventMultiWaitEx. * @param pSrcPos The source code position of the wait. */ static int rtR0SemEventMultiDarwinWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { /* * Validate input. */ AssertPtrReturn(pThis, VERR_INVALID_HANDLE); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("pThis=%p u32Magic=%#x\n", pThis, pThis->u32Magic), VERR_INVALID_HANDLE); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); if (uTimeout != 0 || (fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) RT_ASSERT_PREEMPTIBLE(); rtR0SemEventMultiDarwinRetain(pThis); lck_spin_lock(pThis->pSpinlock); /* * Is the event already signalled or do we have to wait? */ int rc; uint32_t const fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen); if (fOrgStateAndGen & RTSEMEVENTMULTIDARWIN_STATE_MASK) rc = VINF_SUCCESS; else { /* * We have to wait. So, we'll need to convert the timeout and figure * out if it's indefinite or not. */ uint64_t uNsAbsTimeout = 1; if (!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)) { if (fFlags & RTSEMWAIT_FLAGS_MILLISECS) uTimeout = uTimeout < UINT64_MAX / UINT32_C(1000000) * UINT32_C(1000000) ? uTimeout * UINT32_C(1000000) : UINT64_MAX; if (uTimeout == UINT64_MAX) fFlags |= RTSEMWAIT_FLAGS_INDEFINITE; else { uint64_t u64Now; if (fFlags & RTSEMWAIT_FLAGS_RELATIVE) { if (uTimeout != 0) { u64Now = RTTimeSystemNanoTS(); uNsAbsTimeout = u64Now + uTimeout; if (uNsAbsTimeout < u64Now) /* overflow */ fFlags |= RTSEMWAIT_FLAGS_INDEFINITE; } } else { uNsAbsTimeout = uTimeout; u64Now = RTTimeSystemNanoTS(); uTimeout = u64Now < uTimeout ? uTimeout - u64Now : 0; } } } if ( !(fFlags & RTSEMWAIT_FLAGS_INDEFINITE) && uTimeout == 0) { /* * Poll call, we already checked the condition above so no need to * wait for anything. */ rc = VERR_TIMEOUT; } else { for (;;) { /* * Do the actual waiting. */ ASMAtomicWriteBool(&pThis->fHaveBlockedThreads, true); wait_interrupt_t fInterruptible = fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE ? THREAD_ABORTSAFE : THREAD_UNINT; wait_result_t rcWait; if (fFlags & RTSEMWAIT_FLAGS_INDEFINITE) rcWait = lck_spin_sleep(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible); else { uint64_t u64AbsTime; nanoseconds_to_absolutetime(uNsAbsTimeout, &u64AbsTime); rcWait = lck_spin_sleep_deadline(pThis->pSpinlock, LCK_SLEEP_DEFAULT, (event_t)pThis, fInterruptible, u64AbsTime); } /* * Deal with the wait result. */ if (RT_LIKELY(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC)) { switch (rcWait) { case THREAD_AWAKENED: if (RT_LIKELY(ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen)) rc = VINF_SUCCESS; else if (fFlags & RTSEMWAIT_FLAGS_INTERRUPTIBLE) rc = VERR_INTERRUPTED; else continue; /* Seen this happen after fork/exec/something. */ break; case THREAD_TIMED_OUT: Assert(!(fFlags & RTSEMWAIT_FLAGS_INDEFINITE)); rc = VERR_TIMEOUT; break; case THREAD_INTERRUPTED: Assert(fInterruptible != THREAD_UNINT); rc = VERR_INTERRUPTED; break; case THREAD_RESTART: AssertMsg(pThis->u32Magic == ~RTSEMEVENTMULTI_MAGIC, ("%#x\n", pThis->u32Magic)); rc = VERR_SEM_DESTROYED; break; default: AssertMsgFailed(("rcWait=%d\n", rcWait)); rc = VERR_INTERNAL_ERROR_3; break; } } else rc = VERR_SEM_DESTROYED; break; } } } lck_spin_unlock(pThis->pSpinlock); rtR0SemEventMultiDarwinRelease(pThis); return rc; }
/* Lock specified area of surface */ DWORD APIENTRY VBoxDispDDLock(PDD_LOCKDATA lpLock) { PVBOXDISPDEV pDev = (PVBOXDISPDEV) lpLock->lpDD->dhpdev; LOGF_ENTER(); DD_SURFACE_LOCAL* pSurf = lpLock->lpDDSurface; lpLock->ddRVal = DD_OK; #if 0 #ifdef VBOX_WITH_VIDEOHWACCEL if(pDev->vhwa.bEnabled) { PVBOXVHWASURFDESC pDesc = (PVBOXVHWASURFDESC) pSurf->lpGbl->dwReserved1; RECTL tmpRect, *pRect; if (!pDesc) { WARN(("!pDesc, memory overwrite somewhere?")); lpLock->ddRVal = DDERR_GENERIC; return DDHAL_DRIVER_HANDLED; } /* Check if host is still processing drawing commands */ if (ASMAtomicUoReadU32(&pDesc->cPendingBltsSrc) || ASMAtomicUoReadU32(&pDesc->cPendingFlipsCurr) || ASMAtomicUoReadU32(&pDesc->cPendingBltsDst) || ASMAtomicUoReadU32(&pDesc->cPendingFlipsTarg)) { VBoxDispVHWACommandCheckHostCmds(pDev); if(ASMAtomicUoReadU32(&pDesc->cPendingBltsSrc) || ASMAtomicUoReadU32(&pDesc->cPendingFlipsCurr) || ASMAtomicUoReadU32(&pDesc->cPendingBltsDst) || ASMAtomicUoReadU32(&pDesc->cPendingFlipsTarg)) { lpLock->ddRVal = DDERR_WASSTILLDRAWING; return DDHAL_DRIVER_HANDLED; } } if (lpLock->bHasRect) { pRect = &lpLock->rArea; } else { tmpRect.left = 0; tmpRect.top = 0; tmpRect.right = pSurf->lpGbl->wWidth-1; tmpRect.bottom = pSurf->lpGbl->wHeight-1; pRect = &tmpRect; } if (lpLock->dwFlags & DDLOCK_DISCARDCONTENTS) { VBoxDispVHWARegionTrySubstitute(&pDesc->NonupdatedMemRegion, pRect); VBoxDispVHWARegionAdd(&pDesc->UpdatedMemRegion, pRect); } else if (!VBoxDispVHWARegionIntersects(&pDesc->NonupdatedMemRegion, pRect)) { VBoxDispVHWARegionAdd(&pDesc->UpdatedMemRegion, pRect); } else { VBOXVHWACMD *pCmd; pCmd = VBoxDispVHWACommandCreate(pDev, VBOXVHWACMD_TYPE_SURF_LOCK, sizeof(VBOXVHWACMD_SURF_LOCK)); if (pCmd) { VBOXVHWACMD_SURF_LOCK *pBody = VBOXVHWACMD_BODY(pCmd, VBOXVHWACMD_SURF_LOCK); memset(pBody, 0, sizeof(VBOXVHWACMD_SURF_LOCK)); pBody->u.in.offSurface = VBoxDispVHWAVramOffsetFromPDEV(pDev, pSurf->lpGbl->fpVidMem); VBoxDispVHWAFromRECTL(&pBody->u.in.rect, &pDesc->NonupdatedMemRegion.Rect); pBody->u.in.rectValid = 1; pBody->u.in.hSurf = pDesc->hHostHandle; /* wait for the surface to be locked and memory buffer updated */ VBoxDispVHWACommandSubmit(pDev, pCmd); VBOX_WARNRC(pCmd->rc); VBoxDispVHWACommandRelease(pDev, pCmd); VBoxDispVHWARegionClear(&pDesc->NonupdatedMemRegion); } else { WARN(("VBoxDispVHWACommandCreate failed!")); lpLock->ddRVal = DDERR_GENERIC; } } return DDHAL_DRIVER_NOTHANDLED; } #endif /*VBOX_WITH_VIDEOHWACCEL*/ #endif /* We only care about primary surface as we'd have to report dirty rectangles to the host in the DDUnlock*/ if (pSurf->ddsCaps.dwCaps & DDSCAPS_PRIMARYSURFACE) { pDev->ddpsLock.bLocked = TRUE; if (lpLock->bHasRect) { pDev->ddpsLock.rect = lpLock->rArea; } else { pDev->ddpsLock.rect.left = 0; pDev->ddpsLock.rect.top = 0; pDev->ddpsLock.rect.right = pDev->mode.ulWidth; pDev->ddpsLock.rect.bottom = pDev->mode.ulHeight; } } LOGF_LEAVE(); return DDHAL_DRIVER_NOTHANDLED; }
RTDECL(int) RTPowerSignalEvent(RTPOWEREVENT enmEvent) { PRTPOWERNOTIFYREG pCur; RTSPINLOCK hSpinlock; /* * This is a little bit tricky as we cannot be holding the spinlock * while calling the callback. This means that the list might change * while we're walking it, and that multiple events might be running * concurrently (depending on the OS). * * So, the first measure is to employ a 32-bitmask for each * record where we'll use a bit that rotates for each call to * this function to indicate which records that has been * processed. This will take care of both changes to the list * and a reasonable amount of concurrent events. * * In order to avoid having to restart the list walks for every * callback we make, we'll make use a list generation number that is * incremented everytime the list is changed. So, if it remains * unchanged over a callback we can safely continue the iteration. */ uint32_t iDone = ASMAtomicIncU32(&g_iRTPowerDoneBit); iDone %= RT_SIZEOFMEMB(RTPOWERNOTIFYREG, bmDone) * 8; hSpinlock = g_hRTPowerNotifySpinLock; if (hSpinlock == NIL_RTSPINLOCK) return VERR_ACCESS_DENIED; RTSpinlockAcquire(hSpinlock); /* Clear the bit. */ for (pCur = g_pRTPowerCallbackHead; pCur; pCur = pCur->pNext) ASMAtomicBitClear(&pCur->bmDone[0], iDone); /* Iterate the records and perform the callbacks. */ do { uint32_t const iGeneration = ASMAtomicUoReadU32(&g_iRTPowerGeneration); pCur = g_pRTPowerCallbackHead; while (pCur) { if (!ASMAtomicBitTestAndSet(&pCur->bmDone[0], iDone)) { PFNRTPOWERNOTIFICATION pfnCallback = pCur->pfnCallback; void *pvUser = pCur->pvUser; pCur = pCur->pNext; RTSpinlockRelease(g_hRTPowerNotifySpinLock); pfnCallback(enmEvent, pvUser); /* carefully require the lock here, see RTR0MpNotificationTerm(). */ hSpinlock = g_hRTPowerNotifySpinLock; if (hSpinlock == NIL_RTSPINLOCK) return VERR_ACCESS_DENIED; RTSpinlockAcquire(hSpinlock); if (ASMAtomicUoReadU32(&g_iRTPowerGeneration) != iGeneration) break; } else pCur = pCur->pNext; } } while (pCur); RTSpinlockRelease(hSpinlock); return VINF_SUCCESS; }
DWORD APIENTRY VBoxDispDDFlip(PDD_FLIPDATA lpFlip) { PVBOXDISPDEV pDev = (PVBOXDISPDEV) lpFlip->lpDD->dhpdev; LOGF_ENTER(); DD_SURFACE_LOCAL *pCurrSurf = lpFlip->lpSurfCurr; DD_SURFACE_LOCAL *pTargSurf = lpFlip->lpSurfTarg; PVBOXVHWASURFDESC pCurrDesc = (PVBOXVHWASURFDESC) pCurrSurf->lpGbl->dwReserved1; PVBOXVHWASURFDESC pTargDesc = (PVBOXVHWASURFDESC) pTargSurf->lpGbl->dwReserved1; if (pCurrDesc && pTargDesc) { if(ASMAtomicUoReadU32(&pCurrDesc->cPendingFlipsTarg) || ASMAtomicUoReadU32(&pCurrDesc->cPendingFlipsCurr) || ASMAtomicUoReadU32(&pTargDesc->cPendingFlipsTarg) || ASMAtomicUoReadU32(&pTargDesc->cPendingFlipsCurr)) { VBoxDispVHWACommandCheckHostCmds(pDev); if(ASMAtomicUoReadU32(&pCurrDesc->cPendingFlipsTarg) || ASMAtomicUoReadU32(&pCurrDesc->cPendingFlipsCurr) || ASMAtomicUoReadU32(&pTargDesc->cPendingFlipsTarg) || ASMAtomicUoReadU32(&pTargDesc->cPendingFlipsCurr)) { lpFlip->ddRVal = DDERR_WASSTILLDRAWING; return DDHAL_DRIVER_HANDLED; } } VBOXVHWACMD *pCmd; pCmd = VBoxDispVHWACommandCreate(pDev, VBOXVHWACMD_TYPE_SURF_FLIP, sizeof(VBOXVHWACMD_SURF_FLIP)); if (pCmd) { VBOXVHWACMD_SURF_FLIP *pBody = VBOXVHWACMD_BODY(pCmd, VBOXVHWACMD_SURF_FLIP); memset(pBody, 0, sizeof(VBOXVHWACMD_SURF_FLIP)); pBody->u.in.offCurrSurface = VBoxDispVHWAVramOffsetFromPDEV(pDev, pCurrSurf->lpGbl->fpVidMem); pBody->u.in.offTargSurface = VBoxDispVHWAVramOffsetFromPDEV(pDev, pTargSurf->lpGbl->fpVidMem); pBody->u.in.hTargSurf = pTargDesc->hHostHandle; pBody->u.in.hCurrSurf = pCurrDesc->hHostHandle; pBody->TargGuestSurfInfo = (uint64_t)pTargDesc; pBody->CurrGuestSurfInfo = (uint64_t)pCurrDesc; pTargDesc->bVisible = pCurrDesc->bVisible; pCurrDesc->bVisible = false; ASMAtomicIncU32(&pCurrDesc->cPendingFlipsCurr); ASMAtomicIncU32(&pTargDesc->cPendingFlipsTarg); #ifdef DEBUG ASMAtomicIncU32(&pCurrDesc->cFlipsCurr); ASMAtomicIncU32(&pTargDesc->cFlipsTarg); #endif if(pTargDesc->UpdatedMemRegion.bValid) { pBody->u.in.xUpdatedTargMemValid = 1; VBoxDispVHWAFromRECTL(&pBody->u.in.xUpdatedTargMemRect, &pTargDesc->UpdatedMemRegion.Rect); VBoxDispVHWARegionClear(&pTargDesc->UpdatedMemRegion); } VBoxDispVHWACommandSubmitAsynch(pDev, pCmd, VBoxDispVHWASurfFlipCompletion, NULL); lpFlip->ddRVal = DD_OK; } else { WARN(("VBoxDispVHWACommandCreate failed!")); lpFlip->ddRVal = DDERR_GENERIC; } } else { WARN(("!(pCurrDesc && pTargDesc)")); lpFlip->ddRVal = DDERR_GENERIC; } LOGF_LEAVE(); return DDHAL_DRIVER_HANDLED; }
/** * Worker for RTSemEventMultiWaitEx and RTSemEventMultiWaitExDebug. * * @returns VBox status code. * @param pThis The event semaphore. * @param fFlags See RTSemEventMultiWaitEx. * @param uTimeout See RTSemEventMultiWaitEx. * @param pSrcPos The source code position of the wait. */ static int rtR0SemEventMultiSolWait(PRTSEMEVENTMULTIINTERNAL pThis, uint32_t fFlags, uint64_t uTimeout, PCRTLOCKVALSRCPOS pSrcPos) { uint32_t fOrgStateAndGen; int rc; /* * Validate the input. */ AssertPtrReturn(pThis, VERR_INVALID_PARAMETER); AssertMsgReturn(pThis->u32Magic == RTSEMEVENTMULTI_MAGIC, ("%p u32Magic=%RX32\n", pThis, pThis->u32Magic), VERR_INVALID_PARAMETER); AssertReturn(RTSEMWAIT_FLAGS_ARE_VALID(fFlags), VERR_INVALID_PARAMETER); rtR0SemEventMultiSolRetain(pThis); mutex_enter(&pThis->Mtx); /* this could be moved down to the else, but play safe for now. */ /* * Is the event already signalled or do we have to wait? */ fOrgStateAndGen = ASMAtomicUoReadU32(&pThis->fStateAndGen); if (fOrgStateAndGen & RTSEMEVENTMULTISOL_STATE_MASK) rc = VINF_SUCCESS; else { /* * We have to wait. */ RTR0SEMSOLWAIT Wait; rc = rtR0SemSolWaitInit(&Wait, fFlags, uTimeout); if (RT_SUCCESS(rc)) { for (;;) { /* The destruction test. */ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)) rc = VERR_SEM_DESTROYED; else { /* Check the exit conditions. */ if (RT_UNLIKELY(pThis->u32Magic != RTSEMEVENTMULTI_MAGIC)) rc = VERR_SEM_DESTROYED; else if (ASMAtomicUoReadU32(&pThis->fStateAndGen) != fOrgStateAndGen) rc = VINF_SUCCESS; else if (rtR0SemSolWaitHasTimedOut(&Wait)) rc = VERR_TIMEOUT; else if (rtR0SemSolWaitWasInterrupted(&Wait)) rc = VERR_INTERRUPTED; else { /* Do the wait and then recheck the conditions. */ rtR0SemSolWaitDoIt(&Wait, &pThis->Cnd, &pThis->Mtx, &pThis->fStateAndGen, fOrgStateAndGen); continue; } } break; } rtR0SemSolWaitDelete(&Wait); } } mutex_exit(&pThis->Mtx); rtR0SemEventMultiSolRelease(pThis); return rc; }
/** * Gets the enmState member atomically. * * Used for all reads. * * @returns The enmState value. * @param pThis The instance. */ DECLINLINE(VBOXNETADPSTATE) vboxNetAdpGetState(PVBOXNETADP pThis) { return (VBOXNETADPSTATE)ASMAtomicUoReadU32((uint32_t volatile *)&pThis->enmState); }
int main() { RTR3Init(); /* * Create a ping pong and kick off a second thread which we'll * exchange TSTSEMPINGPONG_ITERATIONS messages with. */ RTPINGPONG PingPong; int rc = RTSemPingPongInit(&PingPong); if (RT_FAILURE(rc)) { RTPrintf("tstSemPingPong: FATAL ERROR - RTSemPingPongInit -> %Rrc\n", rc); return 1; } RTTHREAD hThread; rc = RTThreadCreate(&hThread, tstSemPingPongThread, &PingPong, 0, RTTHREADTYPE_DEFAULT, RTTHREADFLAGS_WAITABLE, "PONG"); if (RT_FAILURE(rc)) { RTPrintf("tstSemPingPong: FATAL ERROR - RTSemPingPongInit -> %Rrc\n", rc); return 1; } RTPrintf("tstSemPingPong: TESTING - %u iterations...\n", TSTSEMPINGPONG_ITERATIONS); for (uint32_t i = 0; i < TSTSEMPINGPONG_ITERATIONS; i++) { if (!RTSemPingIsSpeaker(&PingPong)) { ASMAtomicIncU32(&g_cErrors); RTPrintf("tstSemPingPong: ERROR - RTSemPingIsSpeaker returned false before RTSemPing.\n"); } rc = RTSemPing(&PingPong); if (RT_FAILURE(rc)) { ASMAtomicIncU32(&g_cErrors); RTPrintf("tstSemPingPong: ERROR - RTSemPing -> %Rrc\n", rc); break; } if (!RTSemPingShouldWait(&PingPong)) { ASMAtomicIncU32(&g_cErrors); RTPrintf("tstSemPingPong: ERROR - RTSemPingShouldWait returned false before RTSemPingWait.\n"); } rc = RTSemPingWait(&PingPong, RT_INDEFINITE_WAIT); if (RT_FAILURE(rc)) { ASMAtomicIncU32(&g_cErrors); RTPrintf("tstSemPingPong: ERROR - RTSemPingWait -> %Rrc\n", rc); break; } } int rcThread; rc = RTThreadWait(hThread, 5000, &rcThread); if (RT_FAILURE(rc)) { ASMAtomicIncU32(&g_cErrors); RTPrintf("tstSemPingPong: ERROR - RTSemPingWait -> %Rrc\n", rc); } rc = RTSemPingPongDelete(&PingPong); if (RT_FAILURE(rc)) { ASMAtomicIncU32(&g_cErrors); RTPrintf("tstSemPingPong: ERROR - RTSemPingPongDestroy -> %Rrc\n", rc); } /* * Summary. */ uint32_t cErrors = ASMAtomicUoReadU32(&g_cErrors); if (cErrors) RTPrintf("tstSemPingPong: FAILED - %d errors\n", cErrors); else RTPrintf("tstSemPingPong: SUCCESS\n"); return cErrors ? 1 : 0; }