static int vgdrvFreeBSDOpen(struct cdev *pDev, int fOpen, struct thread *pTd) #endif { int rc; PVBOXGUESTSESSION pSession; LogFlow(("vgdrvFreeBSDOpen:\n")); /* * Try grab it (we don't grab the giant, remember). */ if (!ASMAtomicCmpXchgPtr(&pDev->si_drv1, (void *)0x42, NULL)) return EBUSY; /* * Create a new session. */ rc = VGDrvCommonCreateUserSession(&g_DevExt, &pSession); if (RT_SUCCESS(rc)) { if (ASMAtomicCmpXchgPtr(&pDev->si_drv1, pSession, (void *)0x42)) { Log(("vgdrvFreeBSDOpen: success - g_DevExt=%p pSession=%p rc=%d pid=%d\n", &g_DevExt, pSession, rc, (int)RTProcSelf())); ASMAtomicIncU32(&cUsers); return 0; } VGDrvCommonCloseSession(&g_DevExt, pSession); } LogRel(("vgdrvFreeBSDOpen: failed. rc=%d\n", rc)); return RTErrConvertToErrno(rc); }
static int VBoxDrvFreeBSDOpen(struct cdev *pDev, int fOpen, struct thread *pTd, int iFd) #endif { PSUPDRVSESSION pSession; int rc; #if __FreeBSD_version < 800062 Log(("VBoxDrvFreeBSDOpen: fOpen=%#x iUnit=%d\n", fOpen, minor2unit(minor(pDev)))); #else Log(("VBoxDrvFreeBSDOpen: fOpen=%#x iUnit=%d\n", fOpen, minor(dev2udev(pDev)))); #endif /* * Let's be a bit picky about the flags... */ if (fOpen != (FREAD|FWRITE /*=O_RDWR*/)) { Log(("VBoxDrvFreeBSDOpen: fOpen=%#x expected %#x\n", fOpen, O_RDWR)); return EINVAL; } /* * Try grab it (we don't grab the giant, remember). */ if (!ASMAtomicCmpXchgPtr(&pDev->si_drv1, (void *)0x42, NULL)) return EBUSY; /* * Create a new session. */ rc = supdrvCreateSession(&g_VBoxDrvFreeBSDDevExt, true /* fUser */, &pSession); if (RT_SUCCESS(rc)) { /** @todo get (r)uid and (r)gid. pSession->Uid = stuff; pSession->Gid = stuff; */ if (ASMAtomicCmpXchgPtr(&pDev->si_drv1, pSession, (void *)0x42)) { ASMAtomicIncU32(&g_cUsers); return 0; } OSDBGPRINT(("VBoxDrvFreeBSDOpen: si_drv1=%p, expected 0x42!\n", pDev->si_drv1)); supdrvCloseSession(&g_VBoxDrvFreeBSDDevExt, pSession); } return RTErrConvertToErrno(rc); }
/** * Allocates and acquires the lock for the stream. * * @returns IPRT status. * @param pStream The stream (valid). */ static int rtStrmAllocLock(PRTSTREAM pStream) { Assert(pStream->pCritSect == NULL); PRTCRITSECT pCritSect = (PRTCRITSECT)RTMemAlloc(sizeof(*pCritSect)); if (!pCritSect) return VERR_NO_MEMORY; /* The native stream lock are normally not recursive. */ int rc = RTCritSectInitEx(pCritSect, RTCRITSECT_FLAGS_NO_NESTING, NIL_RTLOCKVALCLASS, RTLOCKVAL_SUB_CLASS_NONE, "RTSemSpinMutex"); if (RT_SUCCESS(rc)) { rc = RTCritSectEnter(pCritSect); if (RT_SUCCESS(rc)) { if (RT_LIKELY(ASMAtomicCmpXchgPtr(&pStream->pCritSect, pCritSect, NULL))) return VINF_SUCCESS; RTCritSectLeave(pCritSect); } RTCritSectDelete(pCritSect); } RTMemFree(pCritSect); /* Handle the lost race case... */ pCritSect = ASMAtomicReadPtrT(&pStream->pCritSect, PRTCRITSECT); if (pCritSect) return RTCritSectEnter(pCritSect); return rc; }
/** * Closes a IDC connection established by SUPR0IdcOpen. * * @returns VBox status code. * @param pHandle The IDC handle. */ SUPR0DECL(int) SUPR0IdcClose(PSUPDRVIDCHANDLE pHandle) { SUPDRVIDCREQHDR Req; int rc; /* * Catch closed handles and check that the session is valid. */ AssertPtrReturn(pHandle, VERR_INVALID_POINTER); if (!pHandle->s.pSession) return VERR_INVALID_HANDLE; AssertPtrReturn(pHandle->s.pSession, VERR_INVALID_HANDLE); /* * Create the request and hand it to the OS specific code. */ Req.cb = sizeof(Req); Req.rc = VERR_WRONG_ORDER; Req.pSession = pHandle->s.pSession; rc = supR0IdcNativeClose(pHandle, &Req); if (RT_SUCCESS(rc)) { pHandle->s.pSession = NULL; ASMAtomicCmpXchgPtr(&g_pMainHandle, NULL, pHandle); } return rc; }
/** * Close a file device previously opened by VBoxDrvFreeBSDOpen * * @returns 0 on success. * @param pDev The device. * @param fFile The file descriptor flags. * @param DevType The device type (CHR. * @param pTd The calling thread. */ static int VBoxDrvFreeBSDClose(struct cdev *pDev, int fFile, int DevType, struct thread *pTd) { PSUPDRVSESSION pSession = (PSUPDRVSESSION)pDev->si_drv1; #if __FreeBSD_version < 800062 Log(("VBoxDrvFreeBSDClose: fFile=%#x iUnit=%d pSession=%p\n", fFile, minor2unit(minor(pDev)), pSession)); #else Log(("VBoxDrvFreeBSDClose: fFile=%#x iUnit=%d pSession=%p\n", fFile, minor(dev2udev(pDev)), pSession)); #endif /* * Close the session if it's still hanging on to the device... */ if (VALID_PTR(pSession)) { supdrvCloseSession(&g_VBoxDrvFreeBSDDevExt, pSession); if (!ASMAtomicCmpXchgPtr(&pDev->si_drv1, NULL, pSession)) OSDBGPRINT(("VBoxDrvFreeBSDClose: si_drv1=%p expected %p!\n", pDev->si_drv1, pSession)); ASMAtomicDecU32(&g_cUsers); /* Don't use destroy_dev here because it may sleep resulting in a hanging user process. */ destroy_dev_sched(pDev); } else OSDBGPRINT(("VBoxDrvFreeBSDClose: si_drv1=%p!\n", pSession)); return 0; }
static void vboxMpCrShgsmiBufCacheFree(PVBOXMP_CRSHGSMITRANSPORT pCon, PVBOXMP_CRSHGSMICON_BUFDR_CACHE pCache, PVBOXMP_CRSHGSMICON_BUFDR pDr) { if (ASMAtomicCmpXchgPtr(&pCache->pBufDr, pDr, NULL)) return; /* the value is already cached, free the current one */ VBoxMpCrShgsmiTransportBufFree(pCon, pDr->pvBuf); vboxWddmMemFree(pDr); }
static DECLCALLBACK(void) pdmacR3TimerCallback(PVM pVM, PTMTIMER pTimer, void *pvUser) { uint64_t tsCur = RTTimeProgramMilliTS(); uint64_t cMilliesNext = UINT64_MAX; PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pvUser; ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, UINT64_MAX); /* Go through all endpoints and check for expired requests. */ PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpClassFile->Core.pEndpointsHead; while (pEpFile) { /* Check for an expired delay. */ if (pEpFile->pDelayedHead != NULL) { PPDMASYNCCOMPLETIONTASKFILE pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pDelayedHead, NULL, PPDMASYNCCOMPLETIONTASKFILE); while (pTaskFile) { PPDMASYNCCOMPLETIONTASKFILE pTmp = pTaskFile; pTaskFile = pTaskFile->pDelayedNext; if (tsCur >= pTmp->tsDelayEnd) { LogRel(("AIOMgr: Delayed request %#p completed\n", pTmp)); pdmR3AsyncCompletionCompleteTask(&pTmp->Core, pTmp->rc, true); } else { /* Prepend to the delayed list again. */ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL; if (pTmp->tsDelayEnd - tsCur < cMilliesNext) cMilliesNext = pTmp->tsDelayEnd - tsCur; do { pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE); pTmp->pDelayedNext = pHead; } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTmp, pHead)); } } } pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pEpFile->Core.pNext; } if (cMilliesNext < pEpClassFile->cMilliesNext) { ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, cMilliesNext); TMTimerSetMillies(pEpClassFile->pTimer, cMilliesNext); } }
/** * Joins the list pList with whatever is linked up at *pHead. */ static void vmr3ReqJoinFreeSub(volatile PRTREQ *ppHead, PRTREQ pList) { for (unsigned cIterations = 0;; cIterations++) { PRTREQ pHead = ASMAtomicXchgPtrT(ppHead, pList, PRTREQ); if (!pHead) return; PRTREQ pTail = pHead; while (pTail->pNext) pTail = pTail->pNext; pTail->pNext = pList; if (ASMAtomicCmpXchgPtr(ppHead, pHead, pList)) return; pTail->pNext = NULL; if (ASMAtomicCmpXchgPtr(ppHead, pHead, NULL)) return; pList = pHead; Assert(cIterations != 32); Assert(cIterations != 64); } }
RTDECL(int) RTTimerCreateEx(PRTTIMER *ppTimer, uint64_t u64NanoInterval, uint32_t fFlags, PFNRTTIMER pfnTimer, void *pvUser) { *ppTimer = NULL; /* * We don't support the fancy MP features. */ if (fFlags & RTTIMER_FLAGS_CPU_SPECIFIC) return VERR_NOT_SUPPORTED; /* * Lazy initialize the spinlock. */ if (g_Spinlock == NIL_RTSPINLOCK) { RTSPINLOCK Spinlock; int rc = RTSpinlockCreate(&Spinlock, RTSPINLOCK_FLAGS_INTERRUPT_SAFE, "RTTimerOS2"); AssertRCReturn(rc, rc); //bool fRc; //ASMAtomicCmpXchgSize(&g_Spinlock, Spinlock, NIL_RTSPINLOCK, fRc); //if (!fRc) if (!ASMAtomicCmpXchgPtr((void * volatile *)&g_Spinlock, Spinlock, NIL_RTSPINLOCK)) RTSpinlockDestroy(Spinlock); } /* * Allocate and initialize the timer handle. */ PRTTIMER pTimer = (PRTTIMER)RTMemAlloc(sizeof(*pTimer)); if (!pTimer) return VERR_NO_MEMORY; pTimer->u32Magic = RTTIMER_MAGIC; pTimer->pNext = NULL; pTimer->fSuspended = true; pTimer->pfnTimer = pfnTimer; pTimer->pvUser = pvUser; pTimer->u64NanoInterval = u64NanoInterval; pTimer->u64StartTS = 0; /* * Insert the timer into the list (LIFO atm). */ RTSpinlockAcquire(g_Spinlock); g_u32ChangeNo++; pTimer->pNext = g_pTimerHead; g_pTimerHead = pTimer; g_cTimers++; RTSpinlockRelease(g_Spinlock); *ppTimer = pTimer; return VINF_SUCCESS; }
int pdmacFileEpAddTask(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint, PPDMACTASKFILE pTask) { PPDMACTASKFILE pNext; do { pNext = pEndpoint->pTasksNewHead; pTask->pNext = pNext; } while (!ASMAtomicCmpXchgPtr(&pEndpoint->pTasksNewHead, pTask, pNext)); pdmacFileAioMgrWakeup(ASMAtomicReadPtrT(&pEndpoint->pAioMgr, PPDMACEPFILEMGR)); return VINF_SUCCESS; }
RTDECL(int) RTReqQueueAlloc(RTREQQUEUE hQueue, RTREQTYPE enmType, PRTREQ *phReq) { /* * Validate input. */ PRTREQQUEUEINT pQueue = hQueue; AssertPtrReturn(pQueue, VERR_INVALID_HANDLE); AssertReturn(pQueue->u32Magic == RTREQQUEUE_MAGIC, VERR_INVALID_HANDLE); AssertMsgReturn(enmType > RTREQTYPE_INVALID && enmType < RTREQTYPE_MAX, ("%d\n", enmType), VERR_RT_REQUEST_INVALID_TYPE); /* * Try get a recycled packet. * * While this could all be solved with a single list with a lock, it's a sport * of mine to avoid locks. */ int cTries = RT_ELEMENTS(pQueue->apReqFree) * 2; while (--cTries >= 0) { PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)]; PRTREQ pReq = ASMAtomicXchgPtrT(ppHead, NULL, PRTREQ); if (pReq) { PRTREQ pNext = pReq->pNext; if ( pNext && !ASMAtomicCmpXchgPtr(ppHead, pNext, NULL)) vmr3ReqJoinFree(pQueue, pReq->pNext); ASMAtomicDecU32(&pQueue->cReqFree); Assert(pReq->uOwner.hQueue == pQueue); Assert(!pReq->fPoolOrQueue); int rc = rtReqReInit(pReq, enmType); if (RT_SUCCESS(rc)) { *phReq = pReq; LogFlow(("RTReqQueueAlloc: returns VINF_SUCCESS *phReq=%p recycled\n", pReq)); return VINF_SUCCESS; } } } /* * Ok, allocate a new one. */ int rc = rtReqAlloc(enmType, false /*fPoolOrQueue*/, pQueue, phReq); LogFlow(("RTReqQueueAlloc: returns %Rrc *phReq=%p\n", rc, *phReq)); return rc; }
/** * Submits a request to the queue. * * @param pQueue The queue. * @param pReq The request. */ DECLHIDDEN(void) rtReqQueueSubmit(PRTREQQUEUEINT pQueue, PRTREQINT pReq) { PRTREQ pNext; do { pNext = pQueue->pReqs; pReq->pNext = pNext; ASMAtomicWriteBool(&pQueue->fBusy, true); } while (!ASMAtomicCmpXchgPtr(&pQueue->pReqs, pReq, pNext)); /* * Notify queue thread. */ RTSemEventSignal(pQueue->EventSem); }
/** * Queue an item. * The item must have been obtained using PDMQueueAlloc(). Once the item * have been passed to this function it must not be touched! * * @param pQueue The queue handle. * @param pItem The item to insert. * @thread Any thread. */ VMMDECL(void) PDMQueueInsert(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem) { Assert(VALID_PTR(pQueue) && pQueue->CTX_SUFF(pVM)); Assert(VALID_PTR(pItem)); #if 0 /* the paranoid android version: */ void *pvNext; do { pvNext = ASMAtomicUoReadPtr((void * volatile *)&pQueue->CTX_SUFF(pPending)); ASMAtomicUoWritePtr((void * volatile *)&pItem->CTX_SUFF(pNext), pvNext); } while (!ASMAtomicCmpXchgPtr(&pQueue->CTX_SUFF(pPending), pItem, pvNext)); #else PPDMQUEUEITEMCORE pNext; do { pNext = pQueue->CTX_SUFF(pPending); pItem->CTX_SUFF(pNext) = pNext; } while (!ASMAtomicCmpXchgPtr(&pQueue->CTX_SUFF(pPending), pItem, pNext)); #endif if (!pQueue->pTimer) { PVM pVM = pQueue->CTX_SUFF(pVM); Log2(("PDMQueueInsert: VM_FF_PDM_QUEUES %d -> 1\n", VM_FF_ISSET(pVM, VM_FF_PDM_QUEUES))); VM_FF_SET(pVM, VM_FF_PDM_QUEUES); ASMAtomicBitSet(&pVM->pdm.s.fQueueFlushing, PDM_QUEUE_FLUSH_FLAG_PENDING_BIT); #ifdef IN_RING3 # ifdef VBOX_WITH_REM REMR3NotifyQueuePending(pVM); /** @todo r=bird: we can remove REMR3NotifyQueuePending and let VMR3NotifyFF do the work. */ # endif VMR3NotifyGlobalFFU(pVM->pUVM, VMNOTIFYFF_FLAGS_DONE_REM); #endif } STAM_REL_COUNTER_INC(&pQueue->StatInsert); STAM_STATS({ ASMAtomicIncU32(&pQueue->cStatPending); });
/* virtual */ RWLockHandle *VirtualBoxBase::lockHandle() const { /* lazy initialization */ if (RT_LIKELY(mObjectLock)) return mObjectLock; AssertCompile(sizeof(RWLockHandle *) == sizeof(void *)); // getLockingClass() is overridden by many subclasses to return // one of the locking classes listed at the top of AutoLock.h RWLockHandle *objLock = new RWLockHandle(getLockingClass()); if (!ASMAtomicCmpXchgPtr(&mObjectLock, objLock, NULL)) { delete objLock; objLock = ASMAtomicReadPtrT(&mObjectLock, RWLockHandle *); }
/** * Recycles a requst. * * @returns true if recycled, false if it should be freed. * @param pQueue The queue. * @param pReq The request. */ DECLHIDDEN(bool) rtReqQueueRecycle(PRTREQQUEUEINT pQueue, PRTREQINT pReq) { if ( !pQueue || pQueue->cReqFree >= 128) return false; ASMAtomicIncU32(&pQueue->cReqFree); PRTREQ volatile *ppHead = &pQueue->apReqFree[ASMAtomicIncU32(&pQueue->iReqFree) % RT_ELEMENTS(pQueue->apReqFree)]; PRTREQ pNext; do { pNext = *ppHead; ASMAtomicWritePtr(&pReq->pNext, pNext); } while (!ASMAtomicCmpXchgPtr(ppHead, pReq, pNext)); return true; }
VBoxDbgBase::atStateChange(PVM pVM, VMSTATE enmState, VMSTATE /*enmOldState*/, void *pvUser) { VBoxDbgBase *pThis = (VBoxDbgBase *)pvUser; switch (enmState) { case VMSTATE_TERMINATED: /** @todo need to do some locking here? */ if (ASMAtomicCmpXchgPtr(&pThis->m_pVM, NULL, pVM)) pThis->sigTerminated(); break; case VMSTATE_DESTROYING: pThis->sigDestroying(); break; default: break; } }
/** * File close handler * */ static int vgdrvFreeBSDClose(struct cdev *pDev, int fFile, int DevType, struct thread *pTd) { PVBOXGUESTSESSION pSession = (PVBOXGUESTSESSION)pDev->si_drv1; Log(("vgdrvFreeBSDClose: fFile=%#x pSession=%p\n", fFile, pSession)); /* * Close the session if it's still hanging on to the device... */ if (VALID_PTR(pSession)) { VGDrvCommonCloseSession(&g_DevExt, pSession); if (!ASMAtomicCmpXchgPtr(&pDev->si_drv1, NULL, pSession)) Log(("vgdrvFreeBSDClose: si_drv1=%p expected %p!\n", pDev->si_drv1, pSession)); ASMAtomicDecU32(&cUsers); /* Don't use destroy_dev here because it may sleep resulting in a hanging user process. */ destroy_dev_sched(pDev); } else Log(("vgdrvFreeBSDClose: si_drv1=%p!\n", pSession)); return 0; }
DECLCALLBACK(int) VBoxMPHGSMIHostCmdRequestCB(HVBOXVIDEOHGSMI hHGSMI, uint8_t u8Channel, uint32_t iDisplay, struct VBVAHOSTCMD **ppCmd) { LOGF_ENTER(); if (!ppCmd) { LOGF_LEAVE(); return VERR_INVALID_PARAMETER; } PHGSMIHOSTCOMMANDCONTEXT pCtx = &((PVBOXMP_COMMON)hHGSMI)->hostCtx; /* pick up the host commands */ VBoxHGSMIProcessHostQueue(pCtx); HGSMICHANNEL *pChannel = HGSMIChannelFindById(&pCtx->channels, u8Channel); if(pChannel) { VBVA_CHANNELCONTEXTS * pContexts = (VBVA_CHANNELCONTEXTS *)pChannel->handler.pvHandler; VBVADISP_CHANNELCONTEXT *pDispContext = VBoxVbvaFindHandlerInfo(pContexts, iDisplay); if(pDispContext) { VBVAHOSTCMD *pCmd; do { pCmd = ASMAtomicReadPtrT(&pDispContext->pCmd, VBVAHOSTCMD *); } while (!ASMAtomicCmpXchgPtr(&pDispContext->pCmd, NULL, pCmd)); *ppCmd = VBoxVbvaReverseList(pCmd); LOGF_LEAVE(); return VINF_SUCCESS; } else { WARN(("!pDispContext for display %d", iDisplay)); } }
void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc) { PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser; LogFlowFunc(("pTask=%#p pvUser=%#p rc=%Rrc\n", pTask, pvUser, rc)); if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH) { pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, rc, true); } else { Assert((uint32_t)pTask->DataSeg.cbSeg == pTask->DataSeg.cbSeg && (int32_t)pTask->DataSeg.cbSeg >= 0); uint32_t uOld = ASMAtomicSubS32(&pTaskFile->cbTransferLeft, (int32_t)pTask->DataSeg.cbSeg); /* The first error will be returned. */ if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); #ifdef VBOX_WITH_DEBUGGER else { PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; /* Overwrite with injected error code. */ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ) rc = ASMAtomicXchgS32(&pEpFile->rcReqRead, VINF_SUCCESS); else rc = ASMAtomicXchgS32(&pEpFile->rcReqWrite, VINF_SUCCESS); if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); } #endif if (!(uOld - pTask->DataSeg.cbSeg) && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true)) { #ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; /* Check if we should delay completion of the request. */ if ( ASMAtomicReadU32(&pEpFile->msDelay) > 0 && ASMAtomicCmpXchgPtr(&pEpFile->pReqDelayed, pTaskFile, NULL)) { /* Arm the delay. */ pEpFile->tsDelayEnd = RTTimeProgramMilliTS() + pEpFile->msDelay; LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, pEpFile->msDelay)); return; } #endif pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true); #if PDM_ASYNC_COMPLETION_FILE_WITH_DELAY /* Check for an expired delay. */ if ( pEpFile->pReqDelayed != NULL && RTTimeProgramMilliTS() >= pEpFile->tsDelayEnd) { pTaskFile = ASMAtomicXchgPtrT(&pEpFile->pReqDelayed, NULL, PPDMASYNCCOMPLETIONTASKFILE); ASMAtomicXchgU32(&pEpFile->msDelay, 0); LogRel(("AIOMgr: Delayed request %#p completed\n", pTaskFile)); pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true); } #endif } } }
/** * Opens the IDC interface of the support driver. * * This will perform basic version negotiations and fail if the * minimum requirements aren't met. * * @returns VBox status code. * @param pHandle The handle structure (output). * @param uReqVersion The requested version. Pass 0 for default. * @param uMinVersion The minimum required version. Pass 0 for default. * @param puSessionVersion Where to store the session version. Optional. * @param puDriverVersion Where to store the session version. Optional. * @param puDriverRevision Where to store the SVN revision of the driver. Optional. */ SUPR0DECL(int) SUPR0IdcOpen(PSUPDRVIDCHANDLE pHandle, uint32_t uReqVersion, uint32_t uMinVersion, uint32_t *puSessionVersion, uint32_t *puDriverVersion, uint32_t *puDriverRevision) { unsigned uDefaultMinVersion; SUPDRVIDCREQCONNECT Req; int rc; /* * Validate and set failure return values. */ AssertPtrReturn(pHandle, VERR_INVALID_POINTER); pHandle->s.pSession = NULL; AssertPtrNullReturn(puSessionVersion, VERR_INVALID_POINTER); if (puSessionVersion) *puSessionVersion = 0; AssertPtrNullReturn(puDriverVersion, VERR_INVALID_POINTER); if (puDriverVersion) *puDriverVersion = 0; AssertPtrNullReturn(puDriverRevision, VERR_INVALID_POINTER); if (puDriverRevision) *puDriverRevision = 0; AssertReturn(!uMinVersion || (uMinVersion & UINT32_C(0xffff0000)) == (SUPDRV_IDC_VERSION & UINT32_C(0xffff0000)), VERR_INVALID_PARAMETER); AssertReturn(!uReqVersion || (uReqVersion & UINT32_C(0xffff0000)) == (SUPDRV_IDC_VERSION & UINT32_C(0xffff0000)), VERR_INVALID_PARAMETER); /* * Handle default version input and enforce minimum requirements made * by this library. * * The clients will pass defaults (0), and only in the case that some * special API feature was just added will they set an actual version. * So, this is the place where can easily enforce a minimum IDC version * on bugs and similar. It corresponds a bit to what SUPR3Init is * responsible for. */ uDefaultMinVersion = SUPDRV_IDC_VERSION & UINT32_C(0xffff0000); if (!uMinVersion || uMinVersion < uDefaultMinVersion) uMinVersion = uDefaultMinVersion; if (!uReqVersion || uReqVersion < uDefaultMinVersion) uReqVersion = uDefaultMinVersion; /* * Setup the connect request packet and call the OS specific function. */ Req.Hdr.cb = sizeof(Req); Req.Hdr.rc = VERR_WRONG_ORDER; Req.Hdr.pSession = NULL; Req.u.In.u32MagicCookie = SUPDRVIDCREQ_CONNECT_MAGIC_COOKIE; Req.u.In.uMinVersion = uMinVersion; Req.u.In.uReqVersion = uReqVersion; rc = supR0IdcNativeOpen(pHandle, &Req); if (RT_SUCCESS(rc)) { pHandle->s.pSession = Req.u.Out.pSession; if (puSessionVersion) *puSessionVersion = Req.u.Out.uSessionVersion; if (puDriverVersion) *puDriverVersion = Req.u.Out.uDriverVersion; if (puDriverRevision) *puDriverRevision = Req.u.Out.uDriverRevision; /* * We don't really trust anyone, make sure the returned * session and version values actually makes sense. */ if ( VALID_PTR(Req.u.Out.pSession) && Req.u.Out.uSessionVersion >= uMinVersion && (Req.u.Out.uSessionVersion & UINT32_C(0xffff0000)) == (SUPDRV_IDC_VERSION & UINT32_C(0xffff0000))) { ASMAtomicCmpXchgPtr(&g_pMainHandle, pHandle, NULL); return rc; } AssertMsgFailed(("pSession=%p uSessionVersion=0x%x (r%u)\n", Req.u.Out.pSession, Req.u.Out.uSessionVersion, Req.u.Out.uDriverRevision)); rc = VERR_VERSION_MISMATCH; SUPR0IdcClose(pHandle); } return rc; }
void UIFrameBufferQuartz2D::paintEvent(QPaintEvent *aEvent) { /* If the machine is NOT in 'running' state, * the link between framebuffer and video memory * is broken, we should go fallback now... */ if (m_fUsesGuestVRAM && !m_pMachineView->uisession()->isRunning() && !m_pMachineView->uisession()->isPaused() && /* Online snapshotting: */ m_pMachineView->uisession()->machineState() != KMachineState_Saving) { /* Simulate fallback through fake resize-event: */ UIResizeEvent event(FramebufferPixelFormat_Opaque, NULL, 0, 0, 640, 480); resizeEvent(&event); } /* For debugging /Developer/Applications/Performance Tools/Quartz * Debug.app is a nice tool to see which parts of the screen are * updated.*/ Assert(m_image); QWidget* viewport = m_pMachineView->viewport(); Assert(VALID_PTR(viewport)); /* Get the dimensions of the viewport */ CGRect viewRect = ::darwinToCGRect(viewport->geometry()); /* Get the context of this window from Qt */ CGContextRef ctx = ::darwinToCGContextRef(viewport); Assert(VALID_PTR(ctx)); /* Flip the context */ CGContextTranslateCTM(ctx, 0, viewRect.size.height); CGContextScaleCTM(ctx, 1.0, -1.0); /* We handle the seamless mode as a special case. */ if (m_pMachineLogic->visualStateType() == UIVisualStateType_Seamless) { /* Clear the background (make the rect fully transparent): */ CGContextClearRect(ctx, viewRect); #ifdef OVERLAY_CLIPRECTS /* Enable overlay above the seamless mask: */ CGContextSetRGBFillColor(ctx, 0.0, 0.0, 5.0, 0.7); CGContextFillRect(ctx, viewRect); #endif /* OVERLAY_CLIPRECTS */ #ifdef COMP_WITH_SHADOW /* Enable shadows: */ CGContextSetShadow(ctx, CGSizeMake (10, -10), 10); CGContextBeginTransparencyLayer(ctx, NULL); #endif /* COMP_WITH_SHADOW */ /* Determine current visible region: */ RegionRects *pRgnRcts = ASMAtomicXchgPtrT(&mRegion, NULL, RegionRects*); if (pRgnRcts) { /* If visible region is determined: */ if (pRgnRcts->used > 0) { /* Add the clipping rects all at once (they are defined in SetVisibleRegion): */ CGContextBeginPath(ctx); CGContextAddRects(ctx, pRgnRcts->rcts, pRgnRcts->used); /* Now convert the path to a clipping path: */ CGContextClip(ctx); } /* Put back the visible region, free if we cannot (2+ SetVisibleRegion calls): */ if ( !ASMAtomicCmpXchgPtr(&mRegion, pRgnRcts, NULL) && !ASMAtomicCmpXchgPtr(&mRegionUnused, pRgnRcts, NULL)) { RTMemFree(pRgnRcts); pRgnRcts = NULL; } } /* If visible region is still determined: */ if (pRgnRcts && pRgnRcts->used > 0) { /* Create a subimage of the current view. * Currently this subimage is the whole screen. */ CGImageRef subImage; if (!m_pMachineView->pauseShot().isNull()) { CGImageRef pauseImg = ::darwinToCGImageRef(&m_pMachineView->pauseShot()); subImage = CGImageCreateWithImageInRect(pauseImg, CGRectMake(m_pMachineView->contentsX(), m_pMachineView->contentsY(), m_pMachineView->visibleWidth(), m_pMachineView->visibleHeight())); CGImageRelease(pauseImg); } else { #ifdef RT_ARCH_AMD64 /* Not sure who to blame, but it seems on 64bit there goes * something terrible wrong (on a second monitor) when directly * using CGImageCreateWithImageInRect without making a copy. We saw * something like this already with the scale mode. */ CGImageRef tmpImage = CGImageCreateWithImageInRect(m_image, CGRectMake(m_pMachineView->contentsX(), m_pMachineView->contentsY(), m_pMachineView->visibleWidth(), m_pMachineView->visibleHeight())); subImage = CGImageCreateCopy(tmpImage); CGImageRelease(tmpImage); #else /* RT_ARCH_AMD64 */ subImage = CGImageCreateWithImageInRect(m_image, CGRectMake(m_pMachineView->contentsX(), m_pMachineView->contentsY(), m_pMachineView->visibleWidth(), m_pMachineView->visibleHeight())); #endif /* !RT_ARCH_AMD64 */ } Assert(VALID_PTR(subImage)); /* In any case clip the drawing to the view window: */ CGContextClipToRect(ctx, viewRect); /* At this point draw the real vm image: */ CGContextDrawImage(ctx, ::darwinFlipCGRect(viewRect, viewRect.size.height), subImage); /* Release the subimage: */ CGImageRelease(subImage); } #ifdef COMP_WITH_SHADOW CGContextEndTransparencyLayer(ctx); #endif /* COMP_WITH_SHADOW */ #ifdef OVERLAY_CLIPRECTS if (pRgnRcts && pRgnRcts->used > 0) { CGContextBeginPath(ctx); CGContextAddRects(ctx, pRgnRcts->rcts, pRgnRcts->used); CGContextSetRGBStrokeColor(ctx, 1.0, 0.0, 0.0, 0.7); CGContextDrawPath(ctx, kCGPathStroke); } CGContextSetRGBStrokeColor(ctx, 0.0, 1.0, 0.0, 0.7); CGContextStrokeRect(ctx, viewRect); #endif /* OVERLAY_CLIPRECTS */ }
STDMETHODIMP UIFrameBufferQuartz2D::SetVisibleRegion(BYTE *aRectangles, ULONG aCount) { PRTRECT rects = (PRTRECT)aRectangles; if (!rects) return E_POINTER; /** @todo r=bird: Is this thread safe? If I remember the code flow correctly, the * GUI thread could be happily jogging along paintEvent now on another cpu core. * This function is called on the EMT (emulation thread). Which means, blocking * execution waiting for a lock is out of the question. A quick solution using * ASMAtomic(Cmp)XchgPtr and a struct { cAllocated; cRects; aRects[1]; } * *mRegion, *mUnusedRegion; should suffice (and permit you to reuse allocations). */ RegionRects *rgnRcts = ASMAtomicXchgPtrT(&mRegionUnused, NULL, RegionRects *); if (rgnRcts && rgnRcts->allocated < aCount) { RTMemFree (rgnRcts); rgnRcts = NULL; } if (!rgnRcts) { ULONG allocated = RT_ALIGN_32(aCount + 1, 32); allocated = RT_MAX (128, allocated); rgnRcts = (RegionRects *)RTMemAlloc(RT_OFFSETOF(RegionRects, rcts[allocated])); if (!rgnRcts) return E_OUTOFMEMORY; rgnRcts->allocated = allocated; } rgnRcts->used = 0; QRegion reg; // printf ("Region rects follow...\n"); QRect vmScreenRect (0, 0, width(), height()); for (ULONG ind = 0; ind < aCount; ++ ind) { QRect rect; rect.setLeft(rects->xLeft); rect.setTop(rects->yTop); /* QRect are inclusive */ rect.setRight(rects->xRight - 1); rect.setBottom(rects->yBottom - 1); /* The rect should intersect with the vm screen. */ rect = vmScreenRect.intersect(rect); ++ rects; /* Make sure only valid rects are distributed */ /* todo: Test if the other framebuffer implementation have the same * problem with invalid rects (In Linux/Windows) */ if (rect.isValid() && rect.width() > 0 && rect.height() > 0) reg += rect; else continue; CGRect *cgRct = &rgnRcts->rcts[rgnRcts->used]; cgRct->origin.x = rect.x(); cgRct->origin.y = height() - rect.y() - rect.height(); cgRct->size.width = rect.width(); cgRct->size.height = rect.height(); // printf ("Region rect[%d - %d]: %d %d %d %d\n", rgnRcts->used, aCount, rect.x(), rect.y(), rect.height(), rect.width()); rgnRcts->used++; } // printf ("..................................\n"); RegionRects *pOld = ASMAtomicXchgPtrT(&mRegion, rgnRcts, RegionRects *); if ( pOld && !ASMAtomicCmpXchgPtr(&mRegionUnused, pOld, NULL)) RTMemFree(pOld); QApplication::postEvent(m_pMachineView, new UISetRegionEvent (reg)); return S_OK; }
void pdmacFileEpTaskCompleted(PPDMACTASKFILE pTask, void *pvUser, int rc) { PPDMASYNCCOMPLETIONTASKFILE pTaskFile = (PPDMASYNCCOMPLETIONTASKFILE)pvUser; LogFlowFunc(("pTask=%#p pvUser=%#p rc=%Rrc\n", pTask, pvUser, rc)); if (pTask->enmTransferType == PDMACTASKFILETRANSFER_FLUSH) pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, rc, true); else { Assert((uint32_t)pTask->DataSeg.cbSeg == pTask->DataSeg.cbSeg && (int32_t)pTask->DataSeg.cbSeg >= 0); uint32_t uOld = ASMAtomicSubS32(&pTaskFile->cbTransferLeft, (int32_t)pTask->DataSeg.cbSeg); /* The first error will be returned. */ if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); #ifdef VBOX_WITH_DEBUGGER else { PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; /* Overwrite with injected error code. */ if (pTask->enmTransferType == PDMACTASKFILETRANSFER_READ) rc = ASMAtomicXchgS32(&pEpFile->rcReqRead, VINF_SUCCESS); else rc = ASMAtomicXchgS32(&pEpFile->rcReqWrite, VINF_SUCCESS); if (RT_FAILURE(rc)) ASMAtomicCmpXchgS32(&pTaskFile->rc, rc, VINF_SUCCESS); } #endif if (!(uOld - pTask->DataSeg.cbSeg) && !ASMAtomicXchgBool(&pTaskFile->fCompleted, true)) { #ifdef PDM_ASYNC_COMPLETION_FILE_WITH_DELAY PPDMASYNCCOMPLETIONENDPOINTFILE pEpFile = (PPDMASYNCCOMPLETIONENDPOINTFILE)pTaskFile->Core.pEndpoint; PPDMASYNCCOMPLETIONEPCLASSFILE pEpClassFile = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEpFile->Core.pEpClass; /* Check if we should delay completion of the request. */ if ( ASMAtomicReadU32(&pEpFile->msDelay) > 0 && ASMAtomicReadU32(&pEpFile->cReqsDelay) > 0) { uint64_t tsDelay = pEpFile->msDelay; if (pEpFile->msJitter) tsDelay = (RTRandU32() % 100) > 50 ? pEpFile->msDelay + (RTRandU32() % pEpFile->msJitter) : pEpFile->msDelay - (RTRandU32() % pEpFile->msJitter); ASMAtomicDecU32(&pEpFile->cReqsDelay); /* Arm the delay. */ pTaskFile->tsDelayEnd = RTTimeProgramMilliTS() + tsDelay; /* Append to the list. */ PPDMASYNCCOMPLETIONTASKFILE pHead = NULL; do { pHead = ASMAtomicReadPtrT(&pEpFile->pDelayedHead, PPDMASYNCCOMPLETIONTASKFILE); pTaskFile->pDelayedNext = pHead; } while (!ASMAtomicCmpXchgPtr(&pEpFile->pDelayedHead, pTaskFile, pHead)); if (tsDelay < pEpClassFile->cMilliesNext) { ASMAtomicWriteU64(&pEpClassFile->cMilliesNext, tsDelay); TMTimerSetMillies(pEpClassFile->pTimer, tsDelay); } LogRel(("AIOMgr: Delaying request %#p for %u ms\n", pTaskFile, tsDelay)); } else #endif pdmR3AsyncCompletionCompleteTask(&pTaskFile->Core, pTaskFile->rc, true); } } }
RTDECL(int) RTFileAioCtxSubmit(RTFILEAIOCTX hAioCtx, PRTFILEAIOREQ pahReqs, size_t cReqs) { int rc = VINF_SUCCESS; PRTFILEAIOCTXINTERNAL pCtxInt = hAioCtx; /* Parameter checks */ AssertPtrReturn(pCtxInt, VERR_INVALID_HANDLE); AssertReturn(cReqs != 0, VERR_INVALID_POINTER); AssertPtrReturn(pahReqs, VERR_INVALID_PARAMETER); rtFileAioCtxDump(pCtxInt); /* Check that we don't exceed the limit */ if (ASMAtomicUoReadS32(&pCtxInt->cRequests) + cReqs > pCtxInt->cMaxRequests) return VERR_FILE_AIO_LIMIT_EXCEEDED; PRTFILEAIOREQINTERNAL pHead = NULL; do { int rcPosix = 0; size_t cReqsSubmit = 0; size_t i = 0; PRTFILEAIOREQINTERNAL pReqInt; while ( (i < cReqs) && (i < AIO_LISTIO_MAX)) { pReqInt = pahReqs[i]; if (RTFILEAIOREQ_IS_NOT_VALID(pReqInt)) { /* Undo everything and stop submitting. */ for (size_t iUndo = 0; iUndo < i; iUndo++) { pReqInt = pahReqs[iUndo]; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); pReqInt->pCtxInt = NULL; /* Unlink from the list again. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; } rc = VERR_INVALID_HANDLE; break; } pReqInt->pCtxInt = pCtxInt; if (pReqInt->fFlush) break; /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); cReqsSubmit++; i++; } if (cReqsSubmit) { rcPosix = lio_listio(LIO_NOWAIT, (struct aiocb **)pahReqs, cReqsSubmit, NULL); if (RT_UNLIKELY(rcPosix < 0)) { size_t cReqsSubmitted = cReqsSubmit; if (errno == EAGAIN) rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; else rc = RTErrConvertFromErrno(errno); /* Check which ones were not submitted. */ for (i = 0; i < cReqsSubmit; i++) { pReqInt = pahReqs[i]; rcPosix = aio_error(&pReqInt->AioCB); if ((rcPosix != EINPROGRESS) && (rcPosix != 0)) { cReqsSubmitted--; #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) if (errno == EINVAL) #else if (rcPosix == EINVAL) #endif { /* Was not submitted. */ RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { /* An error occurred. */ RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); /* * Looks like Apple and glibc interpret the standard in different ways. * glibc returns the error code which would be in errno but Apple returns * -1 and sets errno to the appropriate value */ #if defined(RT_OS_DARWIN) || defined(RT_OS_FREEBSD) Assert(rcPosix == -1); pReqInt->Rc = RTErrConvertFromErrno(errno); #elif defined(RT_OS_LINUX) pReqInt->Rc = RTErrConvertFromErrno(rcPosix); #endif pReqInt->cbTransfered = 0; } /* Unlink from the list. */ PRTFILEAIOREQINTERNAL pNext, pPrev; pNext = pReqInt->pNext; pPrev = pReqInt->pPrev; if (pNext) pNext->pPrev = pPrev; if (pPrev) pPrev->pNext = pNext; else pHead = pNext; pReqInt->pNext = NULL; pReqInt->pPrev = NULL; } } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmitted); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); break; } ASMAtomicAddS32(&pCtxInt->cRequests, cReqsSubmit); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs -= cReqsSubmit; pahReqs += cReqsSubmit; } /* * Check if we have a flush request now. * If not we hit the AIO_LISTIO_MAX limit * and will continue submitting requests * above. */ if (cReqs && RT_SUCCESS_NP(rc)) { pReqInt = pahReqs[0]; if (pReqInt->fFlush) { /* * lio_listio does not work with flush requests so * we have to use aio_fsync directly. */ rcPosix = aio_fsync(O_SYNC, &pReqInt->AioCB); if (RT_UNLIKELY(rcPosix < 0)) { if (errno == EAGAIN) { rc = VERR_FILE_AIO_INSUFFICIENT_RESSOURCES; RTFILEAIOREQ_SET_STATE(pReqInt, PREPARED); } else { rc = RTErrConvertFromErrno(errno); RTFILEAIOREQ_SET_STATE(pReqInt, COMPLETED); pReqInt->Rc = rc; } pReqInt->cbTransfered = 0; break; } /* Link them together. */ pReqInt->pNext = pHead; if (pHead) pHead->pPrev = pReqInt; pReqInt->pPrev = NULL; pHead = pReqInt; RTFILEAIOREQ_SET_STATE(pReqInt, SUBMITTED); ASMAtomicIncS32(&pCtxInt->cRequests); AssertMsg(pCtxInt->cRequests >= 0, ("Adding requests resulted in overflow\n")); cReqs--; pahReqs++; } } } while ( cReqs && RT_SUCCESS_NP(rc)); if (pHead) { /* * Forward successfully submitted requests to the thread waiting for requests. * We search for a free slot first and if we don't find one * we will grab the first one and append our list to the existing entries. */ unsigned iSlot = 0; while ( (iSlot < RT_ELEMENTS(pCtxInt->apReqsNewHead)) && !ASMAtomicCmpXchgPtr(&pCtxInt->apReqsNewHead[iSlot], pHead, NULL)) iSlot++; if (iSlot == RT_ELEMENTS(pCtxInt->apReqsNewHead)) { /* Nothing found. */ PRTFILEAIOREQINTERNAL pOldHead = ASMAtomicXchgPtrT(&pCtxInt->apReqsNewHead[0], NULL, PRTFILEAIOREQINTERNAL); /* Find the end of the current head and link the old list to the current. */ PRTFILEAIOREQINTERNAL pTail = pHead; while (pTail->pNext) pTail = pTail->pNext; pTail->pNext = pOldHead; ASMAtomicWritePtr(&pCtxInt->apReqsNewHead[0], pHead); } /* Set the internal wakeup flag and wakeup the thread if possible. */ bool fWokenUp = ASMAtomicXchgBool(&pCtxInt->fWokenUpInternal, true); if (!fWokenUp) rtFileAioCtxWakeup(pCtxInt); } rtFileAioCtxDump(pCtxInt); return rc; }
/** * Process pending items in one queue. * * @returns Success indicator. * If false the item the consumer said "enough!". * @param pQueue The queue. */ static bool pdmR3QueueFlush(PPDMQUEUE pQueue) { STAM_PROFILE_START(&pQueue->StatFlushPrf,p); /* * Get the lists. */ PPDMQUEUEITEMCORE pItems = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE); RTRCPTR pItemsRC = ASMAtomicXchgRCPtr(&pQueue->pPendingRC, NIL_RTRCPTR); RTR0PTR pItemsR0 = ASMAtomicXchgR0Ptr(&pQueue->pPendingR0, NIL_RTR0PTR); AssertMsgReturn( pItemsR0 || pItemsRC || pItems, ("Someone is racing us? This shouldn't happen!\n"), true); /* * Reverse the list (it's inserted in LIFO order to avoid semaphores, remember). */ PPDMQUEUEITEMCORE pCur = pItems; pItems = NULL; while (pCur) { PPDMQUEUEITEMCORE pInsert = pCur; pCur = pCur->pNextR3; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Do the same for any pending RC items. */ while (pItemsRC) { PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperRCToR3(pQueue->pVMR3, pItemsRC); pItemsRC = pInsert->pNextRC; pInsert->pNextRC = NIL_RTRCPTR; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Do the same for any pending R0 items. */ while (pItemsR0) { PPDMQUEUEITEMCORE pInsert = (PPDMQUEUEITEMCORE)MMHyperR0ToR3(pQueue->pVMR3, pItemsR0); pItemsR0 = pInsert->pNextR0; pInsert->pNextR0 = NIL_RTR0PTR; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Feed the items to the consumer function. */ Log2(("pdmR3QueueFlush: pQueue=%p enmType=%d pItems=%p\n", pQueue, pQueue->enmType, pItems)); switch (pQueue->enmType) { case PDMQUEUETYPE_DEV: while (pItems) { if (!pQueue->u.Dev.pfnCallback(pQueue->u.Dev.pDevIns, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_DRV: while (pItems) { if (!pQueue->u.Drv.pfnCallback(pQueue->u.Drv.pDrvIns, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_INTERNAL: while (pItems) { if (!pQueue->u.Int.pfnCallback(pQueue->pVMR3, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; case PDMQUEUETYPE_EXTERNAL: while (pItems) { if (!pQueue->u.Ext.pfnCallback(pQueue->u.Ext.pvUser, pItems)) break; pCur = pItems; pItems = pItems->pNextR3; pdmR3QueueFreeItem(pQueue, pCur); } break; default: AssertMsgFailed(("Invalid queue type %d\n", pQueue->enmType)); break; } /* * Success? */ if (pItems) { /* * Reverse the list. */ pCur = pItems; pItems = NULL; while (pCur) { PPDMQUEUEITEMCORE pInsert = pCur; pCur = pInsert->pNextR3; pInsert->pNextR3 = pItems; pItems = pInsert; } /* * Insert the list at the tail of the pending list. */ for (;;) { if (ASMAtomicCmpXchgPtr(&pQueue->pPendingR3, pItems, NULL)) break; PPDMQUEUEITEMCORE pPending = ASMAtomicXchgPtrT(&pQueue->pPendingR3, NULL, PPDMQUEUEITEMCORE); if (pPending) { pCur = pPending; while (pCur->pNextR3) pCur = pCur->pNextR3; pCur->pNextR3 = pItems; pItems = pPending; } } STAM_REL_COUNTER_INC(&pQueue->StatFlushLeftovers); STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p); return false; } STAM_PROFILE_STOP(&pQueue->StatFlushPrf,p); return true; }
STDMETHODIMP UIFrameBufferQuartz2D::SetVisibleRegion(BYTE *pRectangles, ULONG aCount) { LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Rectangle count=%lu\n", (unsigned long)aCount)); /* Make sure rectangles were passed: */ if (!pRectangles) { LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Invalid pRectangles pointer!\n")); return E_POINTER; } /* Lock access to frame-buffer: */ lock(); /* Make sure frame-buffer is used: */ if (m_fIsMarkedAsUnused) { LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Ignored!\n")); /* Unlock access to frame-buffer: */ unlock(); /* Ignore SetVisibleRegion: */ return E_FAIL; } /** @todo r=bird: Is this thread safe? If I remember the code flow correctly, the * GUI thread could be happily jogging along paintEvent now on another cpu core. * This function is called on the EMT (emulation thread). Which means, blocking * execution waiting for a lock is out of the question. A quick solution using * ASMAtomic(Cmp)XchgPtr and a struct { cAllocated; cRects; aRects[1]; } * *mRegion, *mUnusedRegion; should suffice (and permit you to reuse allocations). */ RegionRects *rgnRcts = ASMAtomicXchgPtrT(&mRegionUnused, NULL, RegionRects *); if (rgnRcts && rgnRcts->allocated < aCount) { RTMemFree (rgnRcts); rgnRcts = NULL; } if (!rgnRcts) { ULONG allocated = RT_ALIGN_32(aCount + 1, 32); allocated = RT_MAX (128, allocated); rgnRcts = (RegionRects *)RTMemAlloc(RT_OFFSETOF(RegionRects, rcts[allocated])); if (!rgnRcts) { /* Unlock access to frame-buffer: */ unlock(); return E_OUTOFMEMORY; } rgnRcts->allocated = allocated; } rgnRcts->used = 0; /* Compose region: */ QRegion reg; PRTRECT rects = (PRTRECT)pRectangles; QRect vmScreenRect(0, 0, width(), height()); for (ULONG ind = 0; ind < aCount; ++ ind) { /* Get current rectangle: */ QRect rect; rect.setLeft(rects->xLeft); rect.setTop(rects->yTop); /* Which is inclusive: */ rect.setRight(rects->xRight - 1); rect.setBottom(rects->yBottom - 1); /* The rect should intersect with the vm screen. */ rect = vmScreenRect.intersect(rect); ++rects; /* Make sure only valid rects are distributed: */ if (rect.isValid() && rect.width() > 0 && rect.height() > 0) reg += rect; else continue; /* That is some *magic* added by Knut in r27807: */ CGRect *cgRct = &rgnRcts->rcts[rgnRcts->used]; cgRct->origin.x = rect.x(); cgRct->origin.y = height() - rect.y() - rect.height(); cgRct->size.width = rect.width(); cgRct->size.height = rect.height(); rgnRcts->used++; } RegionRects *pOld = ASMAtomicXchgPtrT(&mRegion, rgnRcts, RegionRects *); if ( pOld && !ASMAtomicCmpXchgPtr(&mRegionUnused, pOld, NULL)) RTMemFree(pOld); /* Send async signal to update asynchronous visible-region: */ LogRel2(("UIFrameBufferQuartz2D::SetVisibleRegion: Sending to async-handler...\n")); emit sigSetVisibleRegion(reg); /* Unlock access to frame-buffer: */ unlock(); /* Confirm SetVisibleRegion: */ return S_OK; }