/** * Asynchronous I/O thread for handling receive. * * @returns VINF_SUCCESS (ignored). * @param Thread Thread handle. * @param pvUser Pointer to a DRVNIC structure. */ static DECLCALLBACK(int) drvNicAsyncIoThread(PPDMDRVINS pDrvIns, PPDMTHREAD pThread) { PDRVNIC pThis = PDMINS_2_DATA(pDrvIns, PDRVNIC); LogFlow(("drvNicAsyncIoThread: pThis=%p\n", pThis)); if (pThread->enmState == PDMTHREADSTATE_INITIALIZING) return VINF_SUCCESS; Genode::Signal_receiver &sig_rec = pThis->nic_client->sig_rec(); while (pThread->enmState == PDMTHREADSTATE_RUNNING) { Genode::Signal sig = sig_rec.wait_for_signal(); int num = sig.num(); Genode::Signal_dispatcher_base *dispatcher; dispatcher = dynamic_cast<Genode::Signal_dispatcher_base *>(sig.context()); dispatcher->dispatch(num); } destruct_lock()->unlock(); return VINF_SUCCESS; }
/** * Allocates a task segment * * @returns Pointer to the new task segment or NULL * @param pEndpoint Pointer to the endpoint */ PPDMACTASKFILE pdmacFileTaskAlloc(PPDMASYNCCOMPLETIONENDPOINTFILE pEndpoint) { PPDMACTASKFILE pTask = NULL; /* Try the small per endpoint cache first. */ if (pEndpoint->pTasksFreeHead == pEndpoint->pTasksFreeTail) { /* Try the bigger endpoint class cache. */ PPDMASYNCCOMPLETIONEPCLASSFILE pEndpointClass = (PPDMASYNCCOMPLETIONEPCLASSFILE)pEndpoint->Core.pEpClass; /* * Allocate completely new. * If this fails we return NULL. */ int rc = MMR3HeapAllocZEx(pEndpointClass->Core.pVM, MM_TAG_PDM_ASYNC_COMPLETION, sizeof(PDMACTASKFILE), (void **)&pTask); if (RT_FAILURE(rc)) pTask = NULL; LogFlow(("Allocated task %p\n", pTask)); } else { /* Grab a free task from the head. */ AssertMsg(pEndpoint->cTasksCached > 0, ("No tasks cached but list contains more than one element\n")); pTask = pEndpoint->pTasksFreeHead; pEndpoint->pTasksFreeHead = pTask->pNext; ASMAtomicDecU32(&pEndpoint->cTasksCached); } pTask->pNext = NULL; return pTask; }
/** * Register a access handler for a virtual range. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param enmType Handler type. Any of the PGMVIRTHANDLERTYPE_* enums. * @param GCPtr Start address. * @param GCPtrLast Last address (inclusive). * @param pfnInvalidateR3 The R3 invalidate callback (can be 0) * @param pfnHandlerR3 The R3 handler. * @param pszHandlerRC The RC handler symbol name. * @param pszModRC The RC handler module. * @param pszDesc Pointer to description string. This must not be freed. */ VMMR3DECL(int) PGMR3HandlerVirtualRegister(PVM pVM, PGMVIRTHANDLERTYPE enmType, RTGCPTR GCPtr, RTGCPTR GCPtrLast, PFNPGMR3VIRTINVALIDATE pfnInvalidateR3, PFNPGMR3VIRTHANDLER pfnHandlerR3, const char *pszHandlerRC, const char *pszModRC, const char *pszDesc) { LogFlow(("PGMR3HandlerVirtualRegisterEx: enmType=%d GCPtr=%RGv GCPtrLast=%RGv pszHandlerRC=%p:{%s} pszModRC=%p:{%s} pszDesc=%s\n", enmType, GCPtr, GCPtrLast, pszHandlerRC, pszHandlerRC, pszModRC, pszModRC, pszDesc)); /* Not supported/relevant for VT-x and AMD-V. */ if (HMIsEnabled(pVM)) return VERR_NOT_IMPLEMENTED; /* * Validate input. */ if (!pszModRC) pszModRC = VMMGC_MAIN_MODULE_NAME; if (!pszModRC || !*pszModRC || !pszHandlerRC || !*pszHandlerRC) { AssertMsgFailed(("pfnHandlerGC or/and pszModRC is missing\n")); return VERR_INVALID_PARAMETER; } /* * Resolve the GC handler. */ RTRCPTR pfnHandlerRC; int rc = PDMR3LdrGetSymbolRCLazy(pVM, pszModRC, NULL /*pszSearchPath*/, pszHandlerRC, &pfnHandlerRC); if (RT_SUCCESS(rc)) return PGMR3HandlerVirtualRegisterEx(pVM, enmType, GCPtr, GCPtrLast, pfnInvalidateR3, pfnHandlerR3, pfnHandlerRC, pszDesc); AssertMsgFailed(("Failed to resolve %s.%s, rc=%Rrc.\n", pszModRC, pszHandlerRC, rc)); return rc; }
RTDECL(int) RTSymlinkCreate(const char *pszSymlink, const char *pszTarget, RTSYMLINKTYPE enmType, uint32_t fCreate) { /* * Validate the input. */ AssertReturn(enmType > RTSYMLINKTYPE_INVALID && enmType < RTSYMLINKTYPE_END, VERR_INVALID_PARAMETER); AssertPtrReturn(pszSymlink, VERR_INVALID_POINTER); AssertPtrReturn(pszTarget, VERR_INVALID_POINTER); /* * Convert the paths. */ char const *pszNativeSymlink; int rc = rtPathToNative(&pszNativeSymlink, pszSymlink, NULL); if (RT_SUCCESS(rc)) { const char *pszNativeTarget; rc = rtPathToNative(&pszNativeTarget, pszTarget, NULL); if (RT_SUCCESS(rc)) { /* * Create the link. */ if (symlink(pszNativeTarget, pszNativeSymlink) == 0) rc = VINF_SUCCESS; else rc = RTErrConvertFromErrno(errno); rtPathFreeNative(pszNativeTarget, pszTarget); } rtPathFreeNative(pszNativeSymlink, pszSymlink); } LogFlow(("RTSymlinkCreate(%p={%s}, %p={%s}, %d, %#x): returns %Rrc\n", pszSymlink, pszSymlink, pszTarget, pszTarget, enmType, fCreate, rc)); return rc; }
RTDECL(int) RTDirRemove(const char *pszPath) { /* * Convert to UTF-16. */ PRTUTF16 pwszString; int rc = RTStrToUtf16(pszPath, &pwszString); AssertRC(rc); if (RT_SUCCESS(rc)) { /* * Remove the directory. */ if (RemoveDirectoryW((LPCWSTR)pwszString)) rc = VINF_SUCCESS; else rc = RTErrConvertFromWin32(GetLastError()); RTUtf16Free(pwszString); } LogFlow(("RTDirRemove(%p:{%s}): returns %Rrc\n", pszPath, pszPath, rc)); return rc; }
/** * The client driver IOCtl Wrapper function. * * @returns VBox status code. * @param pDevSol The Solaris device instance. * @param Function The Function. * @param pvData Opaque pointer to the data. * @param cbData Size of the data pointed to by pvData. */ static int usbProxySolarisIOCtl(PUSBPROXYDEVSOL pDevSol, unsigned Function, void *pvData, size_t cbData) { if (RT_UNLIKELY(pDevSol->hFile == NIL_RTFILE)) { LogFlow((USBPROXY ":usbProxySolarisIOCtl connection to driver gone!\n")); return VERR_VUSB_DEVICE_NOT_ATTACHED; } VBOXUSBREQ Req; Req.u32Magic = VBOXUSB_MAGIC; Req.rc = -1; Req.cbData = cbData; Req.pvDataR3 = pvData; int Ret = -1; int rc = RTFileIoCtl(pDevSol->hFile, Function, &Req, sizeof(Req), &Ret); if (RT_SUCCESS(rc)) { if (RT_FAILURE(Req.rc)) { if (Req.rc == VERR_VUSB_DEVICE_NOT_ATTACHED) { pDevSol->pProxyDev->fDetached = true; usbProxySolarisCloseFile(pDevSol); LogRel((USBPROXY ":Command %#x failed, USB Device '%s' disconnected!\n", Function, pDevSol->pProxyDev->pUsbIns->pszName)); } else LogRel((USBPROXY ":Command %#x failed. Req.rc=%Rrc\n", Function, Req.rc)); } return Req.rc; } LogRel((USBPROXY ":Function %#x failed. rc=%Rrc\n", Function, rc)); return rc; }
DECLCALLBACK(int) HostPowerServiceDarwin::powerChangeNotificationThread(RTTHREAD /* ThreadSelf */, void *pInstance) { HostPowerServiceDarwin *pPowerObj = static_cast<HostPowerServiceDarwin *>(pInstance); /* We have to initial set the critical state of the battery, cause we want * not the HostPowerService to inform about that state when a VM starts. * See lowPowerHandler for more info. */ pPowerObj->checkBatteryCriticalLevel(); /* Register to receive system sleep notifications */ pPowerObj->mRootPort = IORegisterForSystemPower(pPowerObj, &pPowerObj->mNotifyPort, HostPowerServiceDarwin::powerChangeNotificationHandler, &pPowerObj->mNotifierObject); if (pPowerObj->mRootPort == MACH_PORT_NULL) { LogFlow(("IORegisterForSystemPower failed\n")); return VERR_NOT_SUPPORTED; } pPowerObj->mRunLoop = CFRunLoopGetCurrent(); /* Add the notification port to the application runloop */ CFRunLoopAddSource(pPowerObj->mRunLoop, IONotificationPortGetRunLoopSource(pPowerObj->mNotifyPort), kCFRunLoopCommonModes); /* Register for all battery change events. The handler will check for low * power events itself. */ CFRunLoopSourceRef runLoopSource = IOPSNotificationCreateRunLoopSource(HostPowerServiceDarwin::lowPowerHandler, pPowerObj); CFRunLoopAddSource(pPowerObj->mRunLoop, runLoopSource, kCFRunLoopCommonModes); /* Start the run loop. This blocks. */ CFRunLoopRun(); return VINF_SUCCESS; }
static uint32_t kstatGet(const char *name) { kstat_ctl_t *kc; uint32_t uSpeed = 0; if ((kc = kstat_open()) == 0) { LogRel(("kstat_open() -> %d\n", errno)); return 0; } kstat_t *ksAdapter = kstat_lookup(kc, "link", -1, (char *)name); if (ksAdapter == 0) { char szModule[KSTAT_STRLEN]; uint32_t uInstance = getInstance(name, szModule); ksAdapter = kstat_lookup(kc, szModule, uInstance, "phys"); if (ksAdapter == 0) ksAdapter = kstat_lookup(kc, szModule, uInstance, (char*)name); } if (ksAdapter == 0) LogRel(("Failed to get network statistics for %s\n", name)); else if (kstat_read(kc, ksAdapter, 0) == -1) LogRel(("kstat_read(%s) -> %d\n", name, errno)); else { kstat_named_t *kn; if ((kn = (kstat_named_t *)kstat_data_lookup(ksAdapter, (char *)"ifspeed")) == 0) LogRel(("kstat_data_lookup(ifspeed) -> %d, name=%s\n", errno, name)); else uSpeed = kn->value.ul / 1000000; /* bits -> Mbits */ } kstat_close(kc); LogFlow(("kstatGet(%s) -> %u Mbit/s\n", name, uSpeed)); return uSpeed; }
/** * Reap URBs in-flight on a device. * * @returns Pointer to a completed URB. * @returns NULL if no URB was completed. * @param pProxyDev The device. * @param cMillies Number of milliseconds to wait. Use 0 to not wait at all. */ static DECLCALLBACK(PVUSBURB) usbProxyFreeBSDUrbReap(PUSBPROXYDEV pProxyDev, RTMSINTERVAL cMillies) { struct usb_fs_endpoint *pXferEndpoint; PUSBPROXYDEVFBSD pDevFBSD = USBPROXYDEV_2_DATA(pProxyDev, PUSBPROXYDEVFBSD); PUSBENDPOINTFBSD pEndpointFBSD; PVUSBURB pUrb; struct usb_fs_complete UsbFsComplete; struct pollfd pfd[2]; int rc; LogFlow(("usbProxyFreeBSDUrbReap: pProxyDev=%p, cMillies=%u\n", pProxyDev, cMillies)); repeat: pUrb = NULL; /* check for cancelled transfers */ if (pDevFBSD->fCancelling) { for (unsigned n = 0; n < USBFBSD_MAXENDPOINTS; n++) { pEndpointFBSD = &pDevFBSD->aSwEndpoint[n]; if (pEndpointFBSD->fCancelling) { pEndpointFBSD->fCancelling = false; pUrb = pEndpointFBSD->pUrb; pEndpointFBSD->pUrb = NULL; if (pUrb != NULL) break; } } if (pUrb != NULL) { pUrb->enmStatus = VUSBSTATUS_INVALID; pUrb->Dev.pvPrivate = NULL; switch (pUrb->enmType) { case VUSBXFERTYPE_MSG: pUrb->cbData = 0; break; case VUSBXFERTYPE_ISOC: pUrb->cbData = 0; for (int n = 0; n < (int)pUrb->cIsocPkts; n++) pUrb->aIsocPkts[n].cb = 0; break; default: pUrb->cbData = 0; break; } return pUrb; } pDevFBSD->fCancelling = false; } /* Zero default */ memset(&UsbFsComplete, 0, sizeof(UsbFsComplete)); /* Check if any endpoints are complete */ rc = usbProxyFreeBSDDoIoCtl(pProxyDev, USB_FS_COMPLETE, &UsbFsComplete, true); if (RT_SUCCESS(rc)) { pXferEndpoint = &pDevFBSD->aHwEndpoint[UsbFsComplete.ep_index]; pEndpointFBSD = &pDevFBSD->aSwEndpoint[UsbFsComplete.ep_index]; LogFlow(("usbProxyFreeBSDUrbReap: Reaped " "URB %#p\n", pEndpointFBSD->pUrb)); if (pXferEndpoint->status == USB_ERR_CANCELLED) goto repeat; pUrb = pEndpointFBSD->pUrb; pEndpointFBSD->pUrb = NULL; if (pUrb == NULL) goto repeat; switch (pXferEndpoint->status) { case USB_ERR_NORMAL_COMPLETION: pUrb->enmStatus = VUSBSTATUS_OK; break; case USB_ERR_STALLED: pUrb->enmStatus = VUSBSTATUS_STALL; break; default: pUrb->enmStatus = VUSBSTATUS_INVALID; break; } pUrb->Dev.pvPrivate = NULL; switch (pUrb->enmType) { case VUSBXFERTYPE_MSG: pUrb->cbData = pEndpointFBSD->acbData[0] + pEndpointFBSD->acbData[1]; break; case VUSBXFERTYPE_ISOC: { int n; if (pUrb->enmDir == VUSBDIRECTION_OUT) break; pUrb->cbData = 0; for (n = 0; n < (int)pUrb->cIsocPkts; n++) { if (n >= (int)pEndpointFBSD->cMaxFrames) break; pUrb->cbData += pEndpointFBSD->acbData[n]; pUrb->aIsocPkts[n].cb = pEndpointFBSD->acbData[n]; } for (; n < (int)pUrb->cIsocPkts; n++) pUrb->aIsocPkts[n].cb = 0; break; } default: pUrb->cbData = pEndpointFBSD->acbData[0]; break; } LogFlow(("usbProxyFreeBSDUrbReap: Status=%d epindex=%u " "len[0]=%d len[1]=%d\n", (int)pXferEndpoint->status, (unsigned)UsbFsComplete.ep_index, (unsigned)pEndpointFBSD->acbData[0], (unsigned)pEndpointFBSD->acbData[1])); } else if (cMillies != 0 && rc == VERR_RESOURCE_BUSY) { for (;;) { pfd[0].fd = RTFileToNative(pDevFBSD->hFile); pfd[0].events = POLLIN | POLLRDNORM; pfd[0].revents = 0; pfd[1].fd = RTPipeToNative(pDevFBSD->hPipeWakeupR); pfd[1].events = POLLIN | POLLRDNORM; pfd[1].revents = 0; rc = poll(pfd, 2, (cMillies == RT_INDEFINITE_WAIT) ? INFTIM : cMillies); if (rc > 0) { if (pfd[1].revents & POLLIN) { /* Got woken up, drain pipe. */ uint8_t bRead; size_t cbIgnored = 0; RTPipeRead(pDevFBSD->hPipeWakeupR, &bRead, 1, &cbIgnored); /* Make sure we return from this function */ cMillies = 0; } break; } if (rc == 0) return NULL; if (errno != EAGAIN) return NULL; } goto repeat; } return pUrb; }
static int usbProxyFreeBSDEndpointOpen(PUSBPROXYDEV pProxyDev, int Endpoint, bool fIsoc, int index) { PUSBPROXYDEVFBSD pDevFBSD = USBPROXYDEV_2_DATA(pProxyDev, PUSBPROXYDEVFBSD); PUSBENDPOINTFBSD pEndpointFBSD = NULL; /* shut up gcc */ struct usb_fs_endpoint *pXferEndpoint; struct usb_fs_open UsbFsOpen; int rc; LogFlow(("usbProxyFreeBSDEndpointOpen: pProxyDev=%p Endpoint=%d\n", (void *)pProxyDev, Endpoint)); for (; index < USBFBSD_MAXENDPOINTS; index++) { pEndpointFBSD = &pDevFBSD->aSwEndpoint[index]; if (pEndpointFBSD->fCancelling) continue; if ( pEndpointFBSD->fOpen && !pEndpointFBSD->pUrb && (int)pEndpointFBSD->iEpNum == Endpoint) return index; } if (index == USBFBSD_MAXENDPOINTS) { for (index = 0; index != USBFBSD_MAXENDPOINTS; index++) { pEndpointFBSD = &pDevFBSD->aSwEndpoint[index]; if (pEndpointFBSD->fCancelling) continue; if (!pEndpointFBSD->fOpen) break; } if (index == USBFBSD_MAXENDPOINTS) return -1; } /* set ppBuffer and pLength */ pXferEndpoint = &pDevFBSD->aHwEndpoint[index]; pXferEndpoint->ppBuffer = &pEndpointFBSD->apvData[0]; pXferEndpoint->pLength = &pEndpointFBSD->acbData[0]; LogFlow(("usbProxyFreeBSDEndpointOpen: ep_index=%d ep_num=%d\n", index, Endpoint)); memset(&UsbFsOpen, 0, sizeof(UsbFsOpen)); UsbFsOpen.ep_index = index; UsbFsOpen.ep_no = Endpoint; UsbFsOpen.max_bufsize = 256 * 1024; /* Hardcoded assumption about the URBs we get. */ UsbFsOpen.max_frames = fIsoc ? USBFBSD_MAXFRAMES : 2; rc = usbProxyFreeBSDDoIoCtl(pProxyDev, USB_FS_OPEN, &UsbFsOpen, true); if (RT_FAILURE(rc)) { if (rc == VERR_RESOURCE_BUSY) LogFlow(("usbProxyFreeBSDEndpointOpen: EBUSY\n")); return -1; } pEndpointFBSD->fOpen = true; pEndpointFBSD->pUrb = NULL; pEndpointFBSD->iEpNum = Endpoint; pEndpointFBSD->cMaxIo = UsbFsOpen.max_bufsize; pEndpointFBSD->cMaxFrames = UsbFsOpen.max_frames; return index; }
/** * Executes instruction in HM mode if we can. * * This is somewhat comparable to REMR3EmulateInstruction. * * @returns VBox strict status code. * @retval VINF_EM_DBG_STEPPED on success. * @retval VERR_EM_CANNOT_EXEC_GUEST if we cannot execute guest instructions in * HM right now. * * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure for the calling EMT. * @param fFlags Combinations of EM_ONE_INS_FLAGS_XXX. * @thread EMT. */ VMMR3_INT_DECL(VBOXSTRICTRC) EMR3HmSingleInstruction(PVM pVM, PVMCPU pVCpu, uint32_t fFlags) { PCPUMCTX pCtx = pVCpu->em.s.pCtx; Assert(!(fFlags & ~EM_ONE_INS_FLAGS_MASK)); if (!HMR3CanExecuteGuest(pVM, pCtx)) return VINF_EM_RESCHEDULE; uint64_t const uOldRip = pCtx->rip; for (;;) { /* * Service necessary FFs before going into HM. */ if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) { VBOXSTRICTRC rcStrict = emR3HmForcedActions(pVM, pVCpu, pCtx); if (rcStrict != VINF_SUCCESS) { Log(("EMR3HmSingleInstruction: FFs before -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); return rcStrict; } } /* * Go execute it. */ bool fOld = HMSetSingleInstruction(pVM, pVCpu, true); VBOXSTRICTRC rcStrict = VMMR3HmRunGC(pVM, pVCpu); HMSetSingleInstruction(pVM, pVCpu, fOld); LogFlow(("EMR3HmSingleInstruction: %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); /* * Handle high priority FFs and informational status codes. We don't do * normal FF processing the caller or the next call can deal with them. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) { rcStrict = emR3HighPriorityPostForcedActions(pVM, pVCpu, VBOXSTRICTRC_TODO(rcStrict)); LogFlow(("EMR3HmSingleInstruction: FFs after -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); } if (rcStrict != VINF_SUCCESS && (rcStrict < VINF_EM_FIRST || rcStrict > VINF_EM_LAST)) { rcStrict = emR3HmHandleRC(pVM, pVCpu, pCtx, VBOXSTRICTRC_TODO(rcStrict)); Log(("EMR3HmSingleInstruction: emR3HmHandleRC -> %Rrc\n", VBOXSTRICTRC_VAL(rcStrict))); } /* * Done? */ if ( (rcStrict != VINF_SUCCESS && rcStrict != VINF_EM_DBG_STEPPED) || !(fFlags & EM_ONE_INS_FLAGS_RIP_CHANGE) || pCtx->rip != uOldRip) { if (rcStrict == VINF_SUCCESS && pCtx->rip != uOldRip) rcStrict = VINF_EM_DBG_STEPPED; Log(("EMR3HmSingleInstruction: returns %Rrc (rip %llx -> %llx)\n", VBOXSTRICTRC_VAL(rcStrict), uOldRip, pCtx->rip)); return rcStrict; } } }
/** * \#PF Virtual Handler callback for Guest write access to the Guest's own current TSS. * * @returns VBox status code (appropriate for trap handling and GC return). * @param pVM VM Handle. * @param uErrorCode CPU Error code. * @param pRegFrame Trap register frame. * @param pvFault The fault address (cr2). * @param pvRange The base address of the handled virtual range. * @param offRange The offset of the access into this range. * (If it's a EIP range this is the EIP, if not it's pvFault.) */ VMMRCDECL(int) selmRCGuestTSSWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) { PVMCPU pVCpu = VMMGetCpu0(pVM); LogFlow(("selmRCGuestTSSWriteHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange)); /* * Try emulate the access. */ uint32_t cb; int rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); if (RT_SUCCESS(rc) && cb) { rc = VINF_SUCCESS; /* * If it's on the same page as the esp0 and ss0 fields or actually one of them, * then check if any of these has changed. */ PCVBOXTSS pGuestTss = (PVBOXTSS)(uintptr_t)pVM->selm.s.GCPtrGuestTss; if ( PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS(&pGuestTss->padding_ss0) && PAGE_ADDRESS(&pGuestTss->esp0) == PAGE_ADDRESS((uint8_t *)pGuestTss + offRange) && ( pGuestTss->esp0 != pVM->selm.s.Tss.esp1 || pGuestTss->ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ ) { Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv\n", (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)pGuestTss->ss0, (RTGCPTR)pGuestTss->esp0)); pVM->selm.s.Tss.esp1 = pGuestTss->esp0; pVM->selm.s.Tss.ss1 = pGuestTss->ss0 | 1; STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); } /* Handle misaligned TSS in a safe manner (just in case). */ else if ( offRange >= RT_UOFFSETOF(VBOXTSS, esp0) && offRange < RT_UOFFSETOF(VBOXTSS, padding_ss0)) { struct { uint32_t esp0; uint16_t ss0; uint16_t padding_ss0; } s; AssertCompileSize(s, 8); rc = selmRCReadTssBits(pVM, &s, &pGuestTss->esp0, sizeof(s)); if ( rc == VINF_SUCCESS && ( s.esp0 != pVM->selm.s.Tss.esp1 || s.ss0 != (pVM->selm.s.Tss.ss1 & ~1)) /* undo raw-r0 */ ) { Log(("selmRCGuestTSSWriteHandler: R0 stack: %RTsel:%RGv -> %RTsel:%RGv [x-page]\n", (RTSEL)(pVM->selm.s.Tss.ss1 & ~1), (RTGCPTR)pVM->selm.s.Tss.esp1, (RTSEL)s.ss0, (RTGCPTR)s.esp0)); pVM->selm.s.Tss.esp1 = s.esp0; pVM->selm.s.Tss.ss1 = s.ss0 | 1; STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandledChanged); } } /* * If VME is enabled we need to check if the interrupt redirection bitmap * needs updating. */ if ( offRange >= RT_UOFFSETOF(VBOXTSS, offIoBitmap) && (CPUMGetGuestCR4(pVCpu) & X86_CR4_VME)) { if (offRange - RT_UOFFSETOF(VBOXTSS, offIoBitmap) < sizeof(pGuestTss->offIoBitmap)) { uint16_t offIoBitmap = pGuestTss->offIoBitmap; if (offIoBitmap != pVM->selm.s.offGuestIoBitmap) { Log(("TSS offIoBitmap changed: old=%#x new=%#x -> resync in ring-3\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); } else Log(("TSS offIoBitmap: old=%#x new=%#x [unchanged]\n", pVM->selm.s.offGuestIoBitmap, offIoBitmap)); } else { /** @todo not sure how the partial case is handled; probably not allowed */ uint32_t offIntRedirBitmap = pVM->selm.s.offGuestIoBitmap - sizeof(pVM->selm.s.Tss.IntRedirBitmap); if ( offIntRedirBitmap <= offRange && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) >= offRange + cb && offIntRedirBitmap + sizeof(pVM->selm.s.Tss.IntRedirBitmap) <= pVM->selm.s.cbGuestTss) { Log(("TSS IntRedirBitmap Changed: offIoBitmap=%x offIntRedirBitmap=%x cbTSS=%x offRange=%x cb=%x\n", pVM->selm.s.offGuestIoBitmap, offIntRedirBitmap, pVM->selm.s.cbGuestTss, offRange, cb)); /** @todo only update the changed part. */ for (uint32_t i = 0; i < sizeof(pVM->selm.s.Tss.IntRedirBitmap) / 8; i++) { rc = selmRCReadTssBits(pVM, &pVM->selm.s.Tss.IntRedirBitmap[i * 8], (uint8_t *)pGuestTss + offIntRedirBitmap + i * 8, 8); if (rc != VINF_SUCCESS) break; } STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSRedir); } } } /* Return to ring-3 for a full resync if any of the above fails... (?) */ if (rc != VINF_SUCCESS) { VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); if (RT_SUCCESS(rc)) rc = VINF_SUCCESS; } STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSHandled); } else { Assert(RT_FAILURE(rc)); VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_TSS); STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestTSSUnhandled); if (rc == VERR_EM_INTERPRETER) rc = VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT; } return rc; }
/** * @interface_method_impl{PDMDRVREG,pfnConstruct} */ DECLCALLBACK(int) VMMDev::drvConstruct(PPDMDRVINS pDrvIns, PCFGMNODE pCfgHandle, uint32_t fFlags) { PDMDRV_CHECK_VERSIONS_RETURN(pDrvIns); PDRVMAINVMMDEV pThis = PDMINS_2_DATA(pDrvIns, PDRVMAINVMMDEV); LogFlow(("Keyboard::drvConstruct: iInstance=%d\n", pDrvIns->iInstance)); /* * Validate configuration. */ if (!CFGMR3AreValuesValid(pCfgHandle, "Object\0")) return VERR_PDM_DRVINS_UNKNOWN_CFG_VALUES; AssertMsgReturn(PDMDrvHlpNoAttach(pDrvIns) == VERR_PDM_NO_ATTACHED_DRIVER, ("Configuration error: Not possible to attach anything to this driver!\n"), VERR_PDM_DRVINS_NO_ATTACH); /* * IBase. */ pDrvIns->IBase.pfnQueryInterface = VMMDev::drvQueryInterface; pThis->Connector.pfnUpdateGuestStatus = vmmdevUpdateGuestStatus; pThis->Connector.pfnUpdateGuestUserState = vmmdevUpdateGuestUserState; pThis->Connector.pfnUpdateGuestInfo = vmmdevUpdateGuestInfo; pThis->Connector.pfnUpdateGuestInfo2 = vmmdevUpdateGuestInfo2; pThis->Connector.pfnUpdateGuestCapabilities = vmmdevUpdateGuestCapabilities; pThis->Connector.pfnUpdateMouseCapabilities = vmmdevUpdateMouseCapabilities; pThis->Connector.pfnUpdatePointerShape = vmmdevUpdatePointerShape; pThis->Connector.pfnVideoAccelEnable = iface_VideoAccelEnable; pThis->Connector.pfnVideoAccelFlush = iface_VideoAccelFlush; pThis->Connector.pfnVideoModeSupported = vmmdevVideoModeSupported; pThis->Connector.pfnGetHeightReduction = vmmdevGetHeightReduction; pThis->Connector.pfnSetCredentialsJudgementResult = vmmdevSetCredentialsJudgementResult; pThis->Connector.pfnSetVisibleRegion = vmmdevSetVisibleRegion; pThis->Connector.pfnQueryVisibleRegion = vmmdevQueryVisibleRegion; pThis->Connector.pfnReportStatistics = vmmdevReportStatistics; pThis->Connector.pfnQueryStatisticsInterval = vmmdevQueryStatisticsInterval; pThis->Connector.pfnQueryBalloonSize = vmmdevQueryBalloonSize; pThis->Connector.pfnIsPageFusionEnabled = vmmdevIsPageFusionEnabled; #ifdef VBOX_WITH_HGCM pThis->HGCMConnector.pfnConnect = iface_hgcmConnect; pThis->HGCMConnector.pfnDisconnect = iface_hgcmDisconnect; pThis->HGCMConnector.pfnCall = iface_hgcmCall; #endif /* * Get the IVMMDevPort interface of the above driver/device. */ pThis->pUpPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMIVMMDEVPORT); AssertMsgReturn(pThis->pUpPort, ("Configuration error: No VMMDev port interface above!\n"), VERR_PDM_MISSING_INTERFACE_ABOVE); #ifdef VBOX_WITH_HGCM pThis->pHGCMPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMIHGCMPORT); AssertMsgReturn(pThis->pHGCMPort, ("Configuration error: No HGCM port interface above!\n"), VERR_PDM_MISSING_INTERFACE_ABOVE); #endif /* * Get the Console object pointer and update the mpDrv member. */ void *pv; int rc = CFGMR3QueryPtr(pCfgHandle, "Object", &pv); if (RT_FAILURE(rc)) { AssertMsgFailed(("Configuration error: No/bad \"Object\" value! rc=%Rrc\n", rc)); return rc; } pThis->pVMMDev = (VMMDev*)pv; /** @todo Check this cast! */ pThis->pVMMDev->mpDrv = pThis; #ifdef VBOX_WITH_HGCM rc = pThis->pVMMDev->hgcmLoadService(VBOXSHAREDFOLDERS_DLL, "VBoxSharedFolders"); pThis->pVMMDev->fSharedFolderActive = RT_SUCCESS(rc); if (RT_SUCCESS(rc)) { PPDMLED pLed; PPDMILEDPORTS pLedPort; LogRel(("Shared Folders service loaded.\n")); pLedPort = PDMIBASE_QUERY_INTERFACE(pDrvIns->pUpBase, PDMILEDPORTS); AssertMsgReturn(pLedPort, ("Configuration error: No LED port interface above!\n"), VERR_PDM_MISSING_INTERFACE_ABOVE); rc = pLedPort->pfnQueryStatusLed(pLedPort, 0, &pLed); if (RT_SUCCESS(rc) && pLed) { VBOXHGCMSVCPARM parm; parm.type = VBOX_HGCM_SVC_PARM_PTR; parm.u.pointer.addr = pLed; parm.u.pointer.size = sizeof(*pLed); rc = HGCMHostCall("VBoxSharedFolders", SHFL_FN_SET_STATUS_LED, 1, &parm); } else AssertMsgFailed(("pfnQueryStatusLed failed with %Rrc (pLed=%x)\n", rc, pLed)); } else LogRel(("Failed to load Shared Folders service %Rrc\n", rc)); rc = PDMDrvHlpSSMRegisterEx(pDrvIns, HGCM_SSM_VERSION, 4096 /* bad guess */, NULL, NULL, NULL, NULL, iface_hgcmSave, NULL, NULL, iface_hgcmLoad, NULL); if (RT_FAILURE(rc)) return rc; #endif /* VBOX_WITH_HGCM */ return VINF_SUCCESS; }
static DECLCALLBACK(int) drvHostALSAAudioCaptureIn(PPDMIHOSTAUDIO pInterface, PPDMAUDIOHSTSTRMIN pHstStrmIn, uint32_t *pcSamplesCaptured) { NOREF(pInterface); AssertPtrReturn(pHstStrmIn, VERR_INVALID_POINTER); PALSAAUDIOSTREAMIN pThisStrmIn = (PALSAAUDIOSTREAMIN)pHstStrmIn; snd_pcm_sframes_t cAvail; int rc = drvHostALSAAudioGetAvail(pThisStrmIn->phPCM, &cAvail); if (RT_FAILURE(rc)) { LogFunc(("Error getting number of captured frames, rc=%Rrc\n", rc)); return rc; } if (!cAvail) /* No data yet? */ { snd_pcm_state_t state = snd_pcm_state(pThisStrmIn->phPCM); switch (state) { case SND_PCM_STATE_PREPARED: cAvail = AudioMixBufFree(&pHstStrmIn->MixBuf); break; case SND_PCM_STATE_SUSPENDED: { rc = drvHostALSAAudioResume(pThisStrmIn->phPCM); if (RT_FAILURE(rc)) break; LogFlow(("Resuming suspended input stream\n")); break; } default: LogFlow(("No frames available, state=%d\n", state)); break; } if (!cAvail) { if (pcSamplesCaptured) *pcSamplesCaptured = 0; return VINF_SUCCESS; } } /* * Check how much we can read from the capture device without overflowing * the mixer buffer. */ Assert(cAvail); size_t cbMixFree = AudioMixBufFreeBytes(&pHstStrmIn->MixBuf); size_t cbToRead = RT_MIN((size_t)AUDIOMIXBUF_S2B(&pHstStrmIn->MixBuf, cAvail), cbMixFree); LogFlowFunc(("cbToRead=%zu, cAvail=%RI32\n", cbToRead, cAvail)); uint32_t cWrittenTotal = 0; snd_pcm_uframes_t cToRead; snd_pcm_sframes_t cRead; while ( cbToRead && RT_SUCCESS(rc)) { cToRead = RT_MIN(AUDIOMIXBUF_B2S(&pHstStrmIn->MixBuf, cbToRead), AUDIOMIXBUF_B2S(&pHstStrmIn->MixBuf, pThisStrmIn->cbBuf)); AssertBreakStmt(cToRead, rc = VERR_NO_DATA); cRead = snd_pcm_readi(pThisStrmIn->phPCM, pThisStrmIn->pvBuf, cToRead); if (cRead <= 0) { switch (cRead) { case 0: { LogFunc(("No input frames available\n")); rc = VERR_ACCESS_DENIED; break; } case -EAGAIN: /* * Don't set error here because EAGAIN means there are no further frames * available at the moment, try later. As we might have read some frames * already these need to be processed instead. */ cbToRead = 0; break; case -EPIPE: { rc = drvHostALSAAudioRecover(pThisStrmIn->phPCM); if (RT_FAILURE(rc)) break; LogFlowFunc(("Recovered from capturing\n")); continue; } default: LogFunc(("Failed to read input frames: %s\n", snd_strerror(cRead))); rc = VERR_GENERAL_FAILURE; /** @todo Fudge! */ break; } } else { uint32_t cWritten; rc = AudioMixBufWriteCirc(&pHstStrmIn->MixBuf, pThisStrmIn->pvBuf, AUDIOMIXBUF_S2B(&pHstStrmIn->MixBuf, cRead), &cWritten); if (RT_FAILURE(rc)) break; /* * We should not run into a full mixer buffer or we loose samples and * run into an endless loop if ALSA keeps producing samples ("null" * capture device for example). */ AssertLogRelMsgBreakStmt(cWritten > 0, ("Mixer buffer shouldn't be full at this point!\n"), rc = VERR_INTERNAL_ERROR); uint32_t cbWritten = AUDIOMIXBUF_S2B(&pHstStrmIn->MixBuf, cWritten); Assert(cbToRead >= cbWritten); cbToRead -= cbWritten; cWrittenTotal += cWritten; } } if (RT_SUCCESS(rc)) { uint32_t cProcessed = 0; if (cWrittenTotal) rc = AudioMixBufMixToParent(&pHstStrmIn->MixBuf, cWrittenTotal, &cProcessed); if (pcSamplesCaptured) *pcSamplesCaptured = cWrittenTotal; LogFlowFunc(("cWrittenTotal=%RU32 (%RU32 processed), rc=%Rrc\n", cWrittenTotal, cProcessed, rc)); } LogFlowFuncLeaveRC(rc); return rc; }
/** * Halted VM Wait. * Any external event will unblock the thread. * * @returns VINF_SUCCESS unless a fatal error occurred. In the latter * case an appropriate status code is returned. * @param pVM Pointer to the VM. * @param pVCpu Pointer to the VMCPU. * @param fIgnoreInterrupts If set the VM_FF_INTERRUPT flags is ignored. * @thread The emulation thread. */ VMMR3DECL(int) VMR3WaitHalted(PVM pVM, PVMCPU pVCpu, bool fIgnoreInterrupts) { LogFlow(("VMR3WaitHalted: fIgnoreInterrupts=%d\n", fIgnoreInterrupts)); /* * Check Relevant FFs. */ const uint32_t fMask = !fIgnoreInterrupts ? VMCPU_FF_EXTERNAL_HALTED_MASK : VMCPU_FF_EXTERNAL_HALTED_MASK & ~(VMCPU_FF_INTERRUPT_APIC | VMCPU_FF_INTERRUPT_PIC); if ( VM_FF_ISPENDING(pVM, VM_FF_EXTERNAL_HALTED_MASK) || VMCPU_FF_ISPENDING(pVCpu, fMask)) { LogFlow(("VMR3WaitHalted: returns VINF_SUCCESS (FF %#x FFCPU %#x)\n", pVM->fGlobalForcedActions, pVCpu->fLocalForcedActions)); return VINF_SUCCESS; } /* * The yielder is suspended while we're halting, while TM might have clock(s) running * only at certain times and need to be notified.. */ if (pVCpu->idCpu == 0) VMMR3YieldSuspend(pVM); TMNotifyStartOfHalt(pVCpu); /* * Record halt averages for the last second. */ PUVMCPU pUVCpu = pVCpu->pUVCpu; uint64_t u64Now = RTTimeNanoTS(); int64_t off = u64Now - pUVCpu->vm.s.u64HaltsStartTS; if (off > 1000000000) { if (off > _4G || !pUVCpu->vm.s.cHalts) { pUVCpu->vm.s.HaltInterval = 1000000000 /* 1 sec */; pUVCpu->vm.s.HaltFrequency = 1; } else { pUVCpu->vm.s.HaltInterval = (uint32_t)off / pUVCpu->vm.s.cHalts; pUVCpu->vm.s.HaltFrequency = ASMMultU64ByU32DivByU32(pUVCpu->vm.s.cHalts, 1000000000, (uint32_t)off); } pUVCpu->vm.s.u64HaltsStartTS = u64Now; pUVCpu->vm.s.cHalts = 0; } pUVCpu->vm.s.cHalts++; /* * Do the halt. */ Assert(VMCPU_GET_STATE(pVCpu) == VMCPUSTATE_STARTED); VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED_HALTED); PUVM pUVM = pUVCpu->pUVM; int rc = g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnHalt(pUVCpu, fMask, u64Now); VMCPU_SET_STATE(pVCpu, VMCPUSTATE_STARTED); /* * Notify TM and resume the yielder */ TMNotifyEndOfHalt(pVCpu); if (pVCpu->idCpu == 0) VMMR3YieldResume(pVM); LogFlow(("VMR3WaitHalted: returns %Rrc (FF %#x)\n", rc, pVM->fGlobalForcedActions)); return rc; }
uint32_t hgcmObjMake (HGCMObject *pObject, uint32_t u32HandleIn) { int handle = 0; LogFlow(("MAIN::hgcmObjGenerateHandle: pObject %p\n", pObject)); int rc = hgcmObjEnter (); if (RT_SUCCESS(rc)) { ObjectAVLCore *pCore = &pObject->m_core; /* Generate a new handle value. */ uint32_t volatile *pu32HandleCountSource = pObject->Type () == HGCMOBJ_CLIENT? &g_u32ClientHandleCount: &g_u32InternalHandleCount; uint32_t u32Start = *pu32HandleCountSource; for (;;) { uint32_t Key; if (u32HandleIn == 0) { Key = ASMAtomicIncU32 (pu32HandleCountSource); if (Key == u32Start) { /* Rollover. Something is wrong. */ AssertReleaseFailed (); break; } /* 0 and 0x80000000 are not valid handles. */ if ((Key & 0x7FFFFFFF) == 0) { /* Over the invalid value, reinitialize the source. */ *pu32HandleCountSource = pObject->Type () == HGCMOBJ_CLIENT? 0: 0x80000000; continue; } } else { Key = u32HandleIn; } /* Insert object to AVL tree. */ pCore->AvlCore.Key = Key; bool bRC = RTAvlULInsert(&g_pTree, &pCore->AvlCore); /* Could not insert a handle. */ if (!bRC) { if (u32HandleIn == 0) { /* Try another generated handle. */ continue; } else { /* Could not use the specified handle. */ break; } } /* Initialize backlink. */ pCore->pSelf = pObject; /* Reference the object for time while it resides in the tree. */ pObject->Reference (); /* Store returned handle. */ handle = Key; Log(("Object key inserted 0x%08X\n", Key)); break; } hgcmObjLeave (); } else { AssertReleaseMsgFailed (("MAIN::hgcmObjGenerateHandle: Failed to acquire object pool semaphore")); } LogFlow(("MAIN::hgcmObjGenerateHandle: handle = 0x%08X, rc = %Rrc, return void\n", handle, rc)); return handle; }
/** * Restores virtualized flags. * * This function is called from CPUMRawLeave(). It will update the eflags register. * ** @note Only here we are allowed to switch back to guest code (without a special reason such as a trap in patch code)!! * * @param pVM Pointer to the VM. * @param pCtxCore The cpu context core. * @param rawRC Raw mode return code * @see @ref pg_raw */ VMM_INT_DECL(void) PATMRawLeave(PVM pVM, PCPUMCTXCORE pCtxCore, int rawRC) { bool fPatchCode = PATMIsPatchGCAddr(pVM, pCtxCore->eip); /* * We will only be called if PATMRawEnter was previously called. */ register uint32_t efl = pCtxCore->eflags.u32; efl = (efl & ~PATM_VIRTUAL_FLAGS_MASK) | (CTXSUFF(pVM->patm.s.pGCState)->uVMFlags & PATM_VIRTUAL_FLAGS_MASK); pCtxCore->eflags.u32 = efl; CTXSUFF(pVM->patm.s.pGCState)->uVMFlags = X86_EFL_IF; AssertReleaseMsg((efl & X86_EFL_IF) || fPatchCode || rawRC == VINF_PATM_PENDING_IRQ_AFTER_IRET || RT_FAILURE(rawRC), ("Inconsistent state at %RRv rc=%Rrc\n", pCtxCore->eip, rawRC)); AssertReleaseMsg(CTXSUFF(pVM->patm.s.pGCState)->fPIF || fPatchCode || RT_FAILURE(rawRC), ("fPIF=%d eip=%RRv rc=%Rrc\n", CTXSUFF(pVM->patm.s.pGCState)->fPIF, pCtxCore->eip, rawRC)); #ifdef IN_RING3 if ( (efl & X86_EFL_IF) && fPatchCode ) { if ( rawRC < VINF_PATM_LEAVE_RC_FIRST || rawRC > VINF_PATM_LEAVE_RC_LAST) { /* * Golden rules: * - Don't interrupt special patch streams that replace special instructions * - Don't break instruction fusing (sti, pop ss, mov ss) * - Don't go back to an instruction that has been overwritten by a patch jump * - Don't interrupt an idt handler on entry (1st instruction); technically incorrect * */ if (CTXSUFF(pVM->patm.s.pGCState)->fPIF == 1) /* consistent patch instruction state */ { PATMTRANSSTATE enmState; RTRCPTR pOrgInstrGC = PATMR3PatchToGCPtr(pVM, pCtxCore->eip, &enmState); AssertRelease(pOrgInstrGC); Assert(enmState != PATMTRANS_OVERWRITTEN); if (enmState == PATMTRANS_SAFE) { Assert(!patmFindActivePatchByEntrypoint(pVM, pOrgInstrGC)); Log(("Switchback from %RRv to %RRv (Psp=%x)\n", pCtxCore->eip, pOrgInstrGC, CTXSUFF(pVM->patm.s.pGCState)->Psp)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBack); pCtxCore->eip = pOrgInstrGC; fPatchCode = false; /* to reset the stack ptr */ CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* reset this pointer; safe otherwise the state would be PATMTRANS_INHIBITIRQ */ } else { LogFlow(("Patch address %RRv can't be interrupted (state=%d)!\n", pCtxCore->eip, enmState)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail); } } else { LogFlow(("Patch address %RRv can't be interrupted (fPIF=%d)!\n", pCtxCore->eip, CTXSUFF(pVM->patm.s.pGCState)->fPIF)); STAM_COUNTER_INC(&pVM->patm.s.StatSwitchBackFail); } } } #else /* !IN_RING3 */ AssertMsgFailed(("!IN_RING3")); #endif /* !IN_RING3 */ if (!fPatchCode) { if (CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts == (RTRCPTR)pCtxCore->eip) { EMSetInhibitInterruptsPC(VMMGetCpu0(pVM), pCtxCore->eip); } CTXSUFF(pVM->patm.s.pGCState)->GCPtrInhibitInterrupts = 0; /* Reset the stack pointer to the top of the stack. */ #ifdef DEBUG if (CTXSUFF(pVM->patm.s.pGCState)->Psp != PATM_STACK_SIZE) { LogFlow(("PATMRawLeave: Reset PATM stack (Psp = %x)\n", CTXSUFF(pVM->patm.s.pGCState)->Psp)); } #endif CTXSUFF(pVM->patm.s.pGCState)->Psp = PATM_STACK_SIZE; } }
/** * Interface that PDMR3Suspend, PDMR3PowerOff and PDMR3Reset uses when they wait * for the handling of asynchronous notifications to complete. * * @returns VINF_SUCCESS unless a fatal error occurred. In the latter * case an appropriate status code is returned. * @param pUVCpu Pointer to the user mode VMCPU structure. * @thread The emulation thread. */ VMMR3_INT_DECL(int) VMR3AsyncPdmNotificationWaitU(PUVMCPU pUVCpu) { LogFlow(("VMR3AsyncPdmNotificationWaitU:\n")); return VMR3WaitU(pUVCpu); }
/** * Reap URBs in-flight on a device. * * @returns Pointer to a completed URB. * @returns NULL if no URB was completed. * @param pProxyDev The device. * @param cMillies Number of milliseconds to wait. Use 0 to not wait at all. */ static PVUSBURB usbProxyFreeBSDUrbReap(PUSBPROXYDEV pProxyDev, RTMSINTERVAL cMillies) { struct usb_fs_endpoint *pXferEndpoint; PUSBPROXYDEVFBSD pDevFBSD = (PUSBPROXYDEVFBSD) pProxyDev->Backend.pv; PUSBENDPOINTFBSD pEndpointFBSD; PVUSBURB pUrb; struct usb_fs_complete UsbFsComplete; struct pollfd PollFd; int rc; LogFlow(("usbProxyFreeBSDUrbReap: pProxyDev=%p, cMillies=%u\n", pProxyDev, cMillies)); repeat: pUrb = NULL; /* check for cancelled transfers */ if (pDevFBSD->fCancelling) { for (unsigned n = 0; n < USBFBSD_MAXENDPOINTS; n++) { pEndpointFBSD = &pDevFBSD->aSwEndpoint[n]; if (pEndpointFBSD->fCancelling) { pEndpointFBSD->fCancelling = false; pUrb = pEndpointFBSD->pUrb; pEndpointFBSD->pUrb = NULL; if (pUrb != NULL) break; } } if (pUrb != NULL) { pUrb->enmStatus = VUSBSTATUS_INVALID; pUrb->Dev.pvPrivate = NULL; switch (pUrb->enmType) { case VUSBXFERTYPE_MSG: pUrb->cbData = 0; break; case VUSBXFERTYPE_ISOC: pUrb->cbData = 0; for (int n = 0; n < (int)pUrb->cIsocPkts; n++) pUrb->aIsocPkts[n].cb = 0; break; default: pUrb->cbData = 0; break; } return pUrb; } pDevFBSD->fCancelling = false; } /* Zero default */ memset(&UsbFsComplete, 0, sizeof(UsbFsComplete)); /* Check if any endpoints are complete */ rc = usbProxyFreeBSDDoIoCtl(pProxyDev, USB_FS_COMPLETE, &UsbFsComplete, true); if (RT_SUCCESS(rc)) { pXferEndpoint = &pDevFBSD->aHwEndpoint[UsbFsComplete.ep_index]; pEndpointFBSD = &pDevFBSD->aSwEndpoint[UsbFsComplete.ep_index]; LogFlow(("usbProxyFreeBSDUrbReap: Reaped " "URB %#p\n", pEndpointFBSD->pUrb)); if (pXferEndpoint->status == USB_ERR_CANCELLED) goto repeat; pUrb = pEndpointFBSD->pUrb; pEndpointFBSD->pUrb = NULL; if (pUrb == NULL) goto repeat; switch (pXferEndpoint->status) { case USB_ERR_NORMAL_COMPLETION: pUrb->enmStatus = VUSBSTATUS_OK; break; case USB_ERR_STALLED: pUrb->enmStatus = VUSBSTATUS_STALL; break; default: pUrb->enmStatus = VUSBSTATUS_INVALID; break; } pUrb->Dev.pvPrivate = NULL; switch (pUrb->enmType) { case VUSBXFERTYPE_MSG: pUrb->cbData = pEndpointFBSD->acbData[0] + pEndpointFBSD->acbData[1]; break; case VUSBXFERTYPE_ISOC: { int n; if (pUrb->enmDir == VUSBDIRECTION_OUT) break; pUrb->cbData = 0; for (n = 0; n < (int)pUrb->cIsocPkts; n++) { if (n >= (int)pEndpointFBSD->cMaxFrames) break; pUrb->cbData += pEndpointFBSD->acbData[n]; pUrb->aIsocPkts[n].cb = pEndpointFBSD->acbData[n]; } for (; n < (int)pUrb->cIsocPkts; n++) pUrb->aIsocPkts[n].cb = 0; break; } default: pUrb->cbData = pEndpointFBSD->acbData[0]; break; } LogFlow(("usbProxyFreeBSDUrbReap: Status=%d epindex=%u " "len[0]=%d len[1]=%d\n", (int)pXferEndpoint->status, (unsigned)UsbFsComplete.ep_index, (unsigned)pEndpointFBSD->acbData[0], (unsigned)pEndpointFBSD->acbData[1])); } else if (cMillies && rc == VERR_RESOURCE_BUSY) { /* Poll for finished transfers */ PollFd.fd = RTFileToNative(pDevFBSD->hFile); PollFd.events = POLLIN | POLLRDNORM; PollFd.revents = 0; rc = poll(&PollFd, 1, (cMillies == RT_INDEFINITE_WAIT) ? INFTIM : cMillies); if (rc >= 1) { goto repeat; } else { LogFlow(("usbProxyFreeBSDUrbReap: " "poll returned rc=%d\n", rc)); } } return pUrb; }
/** * Interface that PDM the helper asynchronous notification completed methods * uses for EMT0 when it is waiting inside VMR3AsyncPdmNotificationWaitU(). * * @param pUVM Pointer to the user mode VM structure. */ VMMR3_INT_DECL(void) VMR3AsyncPdmNotificationWakeupU(PUVM pUVM) { LogFlow(("VMR3AsyncPdmNotificationWakeupU:\n")); VM_FF_SET(pUVM->pVM, VM_FF_REQUEST); /* this will have to do for now. */ g_aHaltMethods[pUVM->vm.s.iHaltMethod].pfnNotifyCpuFF(&pUVM->aCpus[0], 0 /*fFlags*/); }
/** * \#PF Virtual Handler callback for Guest write access to the Guest's own GDT. * * @returns VBox status code (appropriate for trap handling and GC return). * @param pVM VM Handle. * @param uErrorCode CPU Error code. * @param pRegFrame Trap register frame. * @param pvFault The fault address (cr2). * @param pvRange The base address of the handled virtual range. * @param offRange The offset of the access into this range. * (If it's a EIP range this is the EIP, if not it's pvFault.) */ VMMRCDECL(int) selmRCGuestGDTWriteHandler(PVM pVM, RTGCUINT uErrorCode, PCPUMCTXCORE pRegFrame, RTGCPTR pvFault, RTGCPTR pvRange, uintptr_t offRange) { PVMCPU pVCpu = VMMGetCpu0(pVM); LogFlow(("selmRCGuestGDTWriteHandler errcode=%x fault=%RGv offRange=%08x\n", (uint32_t)uErrorCode, pvFault, offRange)); /* * First check if this is the LDT entry. * LDT updates are problems since an invalid LDT entry will cause trouble during worldswitch. */ int rc; if (CPUMGetGuestLDTR(pVCpu) / sizeof(X86DESC) == offRange / sizeof(X86DESC)) { Log(("LDTR selector change -> fall back to HC!!\n")); rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; /** @todo We're not handling changed to the selectors in LDTR and TR correctly at all. * We should ignore any changes to those and sync them only when they are loaded by the guest! */ } else { /* * Attempt to emulate the instruction and sync the affected entries. */ /** @todo should check if any affected selectors are loaded. */ uint32_t cb; rc = EMInterpretInstruction(pVM, pVCpu, pRegFrame, (RTGCPTR)(RTRCUINTPTR)pvFault, &cb); if (RT_SUCCESS(rc) && cb) { unsigned iGDTE1 = offRange / sizeof(X86DESC); int rc2 = selmGCSyncGDTEntry(pVM, pRegFrame, iGDTE1); if (rc2 == VINF_SUCCESS) { Assert(cb); unsigned iGDTE2 = (offRange + cb - 1) / sizeof(X86DESC); if (iGDTE1 != iGDTE2) rc2 = selmGCSyncGDTEntry(pVM, pRegFrame, iGDTE2); if (rc2 == VINF_SUCCESS) { STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTHandled); return rc; } } if (rc == VINF_SUCCESS || RT_FAILURE(rc2)) rc = rc2; } else { Assert(RT_FAILURE(rc)); if (rc == VERR_EM_INTERPRETER) rc = VINF_EM_RAW_EMULATE_INSTR_GDT_FAULT; } } if ( rc != VINF_EM_RAW_EMULATE_INSTR_LDT_FAULT && rc != VINF_EM_RAW_EMULATE_INSTR_TSS_FAULT) { /* Not necessary when we need to go back to the host context to sync the LDT or TSS. */ VMCPU_FF_SET(pVCpu, VMCPU_FF_SELM_SYNC_GDT); } STAM_COUNTER_INC(&pVM->selm.s.StatRCWriteGuestGDTUnhandled); return rc; }
/** * Create a new thread. * * @returns iprt status code. * @param pThread Where to store the thread handle to the new thread. (optional) * @param pfnThread The thread function. * @param pvUser User argument. * @param cbStack The size of the stack for the new thread. * Use 0 for the default stack size. * @param enmType The thread type. Used for deciding scheduling attributes * of the thread. * @param fFlags Flags of the RTTHREADFLAGS type (ORed together). * @param pszName Thread name. */ RTDECL(int) RTThreadCreate(PRTTHREAD pThread, PFNRTTHREAD pfnThread, void *pvUser, size_t cbStack, RTTHREADTYPE enmType, unsigned fFlags, const char *pszName) { int rc; PRTTHREADINT pThreadInt; LogFlow(("RTThreadCreate: pThread=%p pfnThread=%p pvUser=%p cbStack=%#x enmType=%d fFlags=%#x pszName=%p:{%s}\n", pThread, pfnThread, pvUser, cbStack, enmType, fFlags, pszName, pszName)); /* * Validate input. */ if (!VALID_PTR(pThread) && pThread) { Assert(VALID_PTR(pThread)); return VERR_INVALID_PARAMETER; } if (!VALID_PTR(pfnThread)) { Assert(VALID_PTR(pfnThread)); return VERR_INVALID_PARAMETER; } if (!pszName || !*pszName || strlen(pszName) >= RTTHREAD_NAME_LEN) { AssertMsgFailed(("pszName=%s (max len is %d because of logging)\n", pszName, RTTHREAD_NAME_LEN - 1)); return VERR_INVALID_PARAMETER; } if (fFlags & ~RTTHREADFLAGS_MASK) { AssertMsgFailed(("fFlags=%#x\n", fFlags)); return VERR_INVALID_PARAMETER; } /* * Allocate thread argument. */ pThreadInt = rtThreadAlloc(enmType, fFlags, 0, pszName); if (pThreadInt) { RTNATIVETHREAD NativeThread; pThreadInt->pfnThread = pfnThread; pThreadInt->pvUser = pvUser; pThreadInt->cbStack = cbStack; rc = rtThreadNativeCreate(pThreadInt, &NativeThread); if (RT_SUCCESS(rc)) { rtThreadInsert(pThreadInt, NativeThread); rtThreadRelease(pThreadInt); Log(("RTThreadCreate: Created thread %p (%p) %s\n", pThreadInt, NativeThread, pszName)); if (pThread) *pThread = pThreadInt; return VINF_SUCCESS; } pThreadInt->cRefs = 1; rtThreadRelease(pThreadInt); } else rc = VERR_NO_TMP_MEMORY; LogFlow(("RTThreadCreate: Failed to create thread, rc=%Rrc\n", rc)); AssertReleaseRC(rc); return rc; }
/** * Executes hardware accelerated raw code. (Intel VT-x & AMD-V) * * This function contains the raw-mode version of the inner * execution loop (the outer loop being in EMR3ExecuteVM()). * * @returns VBox status code. The most important ones are: VINF_EM_RESCHEDULE, VINF_EM_RESCHEDULE_RAW, * VINF_EM_RESCHEDULE_REM, VINF_EM_SUSPEND, VINF_EM_RESET and VINF_EM_TERMINATE. * * @param pVM The cross context VM structure. * @param pVCpu The cross context virtual CPU structure. * @param pfFFDone Where to store an indicator telling whether or not * FFs were done before returning. */ int emR3HmExecute(PVM pVM, PVMCPU pVCpu, bool *pfFFDone) { int rc = VERR_IPE_UNINITIALIZED_STATUS; PCPUMCTX pCtx = pVCpu->em.s.pCtx; LogFlow(("emR3HmExecute%d: (cs:eip=%04x:%RGv)\n", pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); *pfFFDone = false; STAM_COUNTER_INC(&pVCpu->em.s.StatHmExecuteEntry); #ifdef EM_NOTIFY_HM HMR3NotifyScheduled(pVCpu); #endif /* * Spin till we get a forced action which returns anything but VINF_SUCCESS. */ for (;;) { STAM_PROFILE_ADV_START(&pVCpu->em.s.StatHmEntry, a); /* Check if a forced reschedule is pending. */ if (HMR3IsRescheduleRequired(pVM, pCtx)) { rc = VINF_EM_RESCHEDULE; break; } /* * Process high priority pre-execution raw-mode FFs. */ #ifdef VBOX_WITH_RAW_MODE Assert(!VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_SELM_SYNC_TSS | VMCPU_FF_SELM_SYNC_GDT | VMCPU_FF_SELM_SYNC_LDT)); #endif if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_PRE_RAW_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_PRE_RAW_MASK)) { rc = emR3HmForcedActions(pVM, pVCpu, pCtx); if (rc != VINF_SUCCESS) break; } #ifdef LOG_ENABLED /* * Log important stuff before entering GC. */ if (TRPMHasTrap(pVCpu)) Log(("CPU%d: Pending hardware interrupt=0x%x cs:rip=%04X:%RGv\n", pVCpu->idCpu, TRPMGetTrapNo(pVCpu), pCtx->cs.Sel, (RTGCPTR)pCtx->rip)); uint32_t cpl = CPUMGetGuestCPL(pVCpu); if (pVM->cCpus == 1) { if (pCtx->eflags.Bits.u1VM) Log(("HWV86: %08X IF=%d\n", pCtx->eip, pCtx->eflags.Bits.u1IF)); else if (CPUMIsGuestIn64BitCodeEx(pCtx)) Log(("HWR%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); else Log(("HWR%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); } else { if (pCtx->eflags.Bits.u1VM) Log(("HWV86-CPU%d: %08X IF=%d\n", pVCpu->idCpu, pCtx->eip, pCtx->eflags.Bits.u1IF)); else if (CPUMIsGuestIn64BitCodeEx(pCtx)) Log(("HWR%d-CPU%d: %04X:%RGv ESP=%RGv IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, (RTGCPTR)pCtx->rip, pCtx->rsp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); else Log(("HWR%d-CPU%d: %04X:%08X ESP=%08X IF=%d IOPL=%d CR0=%x CR4=%x EFER=%x\n", cpl, pVCpu->idCpu, pCtx->cs.Sel, pCtx->eip, pCtx->esp, pCtx->eflags.Bits.u1IF, pCtx->eflags.Bits.u2IOPL, (uint32_t)pCtx->cr0, (uint32_t)pCtx->cr4, (uint32_t)pCtx->msrEFER)); } #endif /* LOG_ENABLED */ /* * Execute the code. */ STAM_PROFILE_ADV_STOP(&pVCpu->em.s.StatHmEntry, a); if (RT_LIKELY(emR3IsExecutionAllowed(pVM, pVCpu))) { STAM_PROFILE_START(&pVCpu->em.s.StatHmExec, x); rc = VMMR3HmRunGC(pVM, pVCpu); STAM_PROFILE_STOP(&pVCpu->em.s.StatHmExec, x); } else { /* Give up this time slice; virtual time continues */ STAM_REL_PROFILE_ADV_START(&pVCpu->em.s.StatCapped, u); RTThreadSleep(5); STAM_REL_PROFILE_ADV_STOP(&pVCpu->em.s.StatCapped, u); rc = VINF_SUCCESS; } /* * Deal with high priority post execution FFs before doing anything else. */ VMCPU_FF_CLEAR(pVCpu, VMCPU_FF_RESUME_GUEST_MASK); if ( VM_FF_IS_PENDING(pVM, VM_FF_HIGH_PRIORITY_POST_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_HIGH_PRIORITY_POST_MASK)) rc = emR3HighPriorityPostForcedActions(pVM, pVCpu, rc); /* * Process the returned status code. */ if (rc >= VINF_EM_FIRST && rc <= VINF_EM_LAST) break; rc = emR3HmHandleRC(pVM, pVCpu, pCtx, rc); if (rc != VINF_SUCCESS) break; /* * Check and execute forced actions. */ #ifdef VBOX_HIGH_RES_TIMERS_HACK TMTimerPollVoid(pVM, pVCpu); #endif if ( VM_FF_IS_PENDING(pVM, VM_FF_ALL_MASK) || VMCPU_FF_IS_PENDING(pVCpu, VMCPU_FF_ALL_MASK)) { rc = emR3ForcedActions(pVM, pVCpu, rc); VBOXVMM_EM_FF_ALL_RET(pVCpu, rc); if ( rc != VINF_SUCCESS && rc != VINF_EM_RESCHEDULE_HM) { *pfFFDone = true; break; } } } /* * Return to outer loop. */ #if defined(LOG_ENABLED) && defined(DEBUG) RTLogFlush(NULL); #endif return rc; }
/** * Common worker for the debug and normal APIs. * * @returns VINF_SUCCESS if entered successfully. * @returns rcBusy when encountering a busy critical section in GC/R0. * @returns VERR_SEM_DESTROYED if the critical section is dead. * * @param pCritSect The PDM critical section to enter. * @param rcBusy The status code to return when we're in GC or R0 * and the section is busy. */ DECL_FORCE_INLINE(int) pdmCritSectEnter(PPDMCRITSECT pCritSect, int rcBusy, PCRTLOCKVALSRCPOS pSrcPos) { Assert(pCritSect->s.Core.cNestings < 8); /* useful to catch incorrect locking */ Assert(pCritSect->s.Core.cNestings >= 0); /* * If the critical section has already been destroyed, then inform the caller. */ AssertMsgReturn(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic), VERR_SEM_DESTROYED); /* * See if we're lucky. */ /* NOP ... */ if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP) return VINF_SUCCESS; RTNATIVETHREAD hNativeSelf = pdmCritSectGetNativeSelf(pCritSect); /* ... not owned ... */ if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos); /* ... or nested. */ if (pCritSect->s.Core.NativeThreadOwner == hNativeSelf) { ASMAtomicIncS32(&pCritSect->s.Core.cLockers); ASMAtomicIncS32(&pCritSect->s.Core.cNestings); Assert(pCritSect->s.Core.cNestings > 1); return VINF_SUCCESS; } /* * Spin for a bit without incrementing the counter. */ /** @todo Move this to cfgm variables since it doesn't make sense to spin on UNI * cpu systems. */ int32_t cSpinsLeft = CTX_SUFF(PDMCRITSECT_SPIN_COUNT_); while (cSpinsLeft-- > 0) { if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, 0, -1)) return pdmCritSectEnterFirst(pCritSect, hNativeSelf, pSrcPos); ASMNopPause(); /** @todo Should use monitor/mwait on e.g. &cLockers here, possibly with a cli'ed pendingpreemption check up front using sti w/ instruction fusing for avoiding races. Hmm ... This is assuming the other party is actually executing code on another CPU ... which we could keep track of if we wanted. */ } #ifdef IN_RING3 /* * Take the slow path. */ return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos); #else # ifdef IN_RING0 /** @todo If preemption is disabled it means we're in VT-x/AMD-V context * and would be better off switching out of that while waiting for * the lock. Several of the locks jumps back to ring-3 just to * get the lock, the ring-3 code will then call the kernel to do * the lock wait and when the call return it will call ring-0 * again and resume via in setjmp style. Not very efficient. */ # if 0 if (ASMIntAreEnabled()) /** @todo this can be handled as well by changing * callers not prepared for longjmp/blocking to * use PDMCritSectTryEnter. */ { /* * Leave HWACCM context while waiting if necessary. */ int rc; if (RTThreadPreemptIsEnabled(NIL_RTTHREAD)) { STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000); rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos); } else { STAM_REL_COUNTER_ADD(&pCritSect->s.StatContentionRZLock, 1000000000); PVM pVM = pCritSect->s.CTX_SUFF(pVM); PVMCPU pVCpu = VMMGetCpu(pVM); HWACCMR0Leave(pVM, pVCpu); RTThreadPreemptRestore(NIL_RTTHREAD, ????); rc = pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos); RTThreadPreemptDisable(NIL_RTTHREAD, ????); HWACCMR0Enter(pVM, pVCpu); } return rc; } # else /* * We preemption hasn't been disabled, we can block here in ring-0. */ if ( RTThreadPreemptIsEnabled(NIL_RTTHREAD) && ASMIntAreEnabled()) return pdmR3R0CritSectEnterContended(pCritSect, hNativeSelf, pSrcPos); # endif #endif /* IN_RING0 */ STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZLock); /* * Call ring-3 to acquire the critical section? */ if (rcBusy == VINF_SUCCESS) { PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); return VMMRZCallRing3(pVM, pVCpu, VMMCALLRING3_PDM_CRIT_SECT_ENTER, MMHyperCCToR3(pVM, pCritSect)); } /* * Return busy. */ LogFlow(("PDMCritSectEnter: locked => R3 (%Rrc)\n", rcBusy)); return rcBusy; #endif /* !IN_RING3 */ }
/** * Checks I/O access for guest or hypervisor breakpoints. * * @returns Strict VBox status code * @retval VINF_SUCCESS no breakpoint. * @retval VINF_EM_DBG_BREAKPOINT hypervisor breakpoint triggered. * @retval VINF_EM_RAW_GUEST_TRAP guest breakpoint triggered, DR6 and DR7 have * been updated appropriately. * * @param pVM The cross context VM structure. * @param pVCpu The cross context CPU structure for the calling EMT. * @param pCtx The CPU context for the calling EMT. * @param uIoPort The I/O port being accessed. * @param cbValue The size/width of the access, in bytes. */ VMM_INT_DECL(VBOXSTRICTRC) DBGFBpCheckIo(PVM pVM, PVMCPU pVCpu, PCPUMCTX pCtx, RTIOPORT uIoPort, uint8_t cbValue) { uint32_t const uIoPortFirst = uIoPort; uint32_t const uIoPortLast = uIoPortFirst + cbValue - 1; /* * Check hyper breakpoints first as the VMM debugger has priority over * the guest. */ for (unsigned iBp = 0; iBp < RT_ELEMENTS(pVM->dbgf.s.aHwBreakpoints); iBp++) { if ( pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.fType == X86_DR7_RW_IO && pVM->dbgf.s.aHwBreakpoints[iBp].fEnabled && pVM->dbgf.s.aHwBreakpoints[iBp].enmType == DBGFBPTYPE_REG ) { uint8_t cbReg = pVM->dbgf.s.aHwBreakpoints[iBp].u.Reg.cb; Assert(RT_IS_POWER_OF_TWO(cbReg)); uint64_t uDrXFirst = pVM->dbgf.s.aHwBreakpoints[iBp].GCPtr & ~(uint64_t)(cbReg - 1); uint64_t uDrXLast = uDrXFirst + cbReg - 1; if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) { /* (See also DBGFRZTrap01Handler.) */ pVCpu->dbgf.s.iActiveBp = pVM->dbgf.s.aHwBreakpoints[iBp].iBp; pVCpu->dbgf.s.fSingleSteppingRaw = false; LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); return VINF_EM_DBG_BREAKPOINT; } } } /* * Check the guest. */ uint32_t const uDr7 = pCtx->dr[7]; if ( (uDr7 & X86_DR7_ENABLED_MASK) && X86_DR7_ANY_RW_IO(uDr7) && (pCtx->cr4 & X86_CR4_DE) ) { for (unsigned iBp = 0; iBp < 4; iBp++) { if ( (uDr7 & X86_DR7_L_G(iBp)) && X86_DR7_GET_RW(uDr7, iBp) == X86_DR7_RW_IO) { /* ASSUME the breakpoint and the I/O width qualifier uses the same encoding (1 2 x 4). */ static uint8_t const s_abInvAlign[4] = { 0, 1, 7, 3 }; uint8_t cbInvAlign = s_abInvAlign[X86_DR7_GET_LEN(uDr7, iBp)]; uint64_t uDrXFirst = pCtx->dr[iBp] & ~(uint64_t)cbInvAlign; uint64_t uDrXLast = uDrXFirst + cbInvAlign; if (uDrXFirst <= uIoPortLast && uDrXLast >= uIoPortFirst) { /* * Update DR6 and DR7. * * See "AMD64 Architecture Programmer's Manual Volume 2", * chapter 13.1.1.3 for details on DR6 bits. The basics is * that the B0..B3 bits are always cleared while the others * must be cleared by software. * * The following sub chapters says the GD bit is always * cleared when generating a #DB so the handler can safely * access the debug registers. */ pCtx->dr[6] &= ~X86_DR6_B_MASK; pCtx->dr[6] |= X86_DR6_B(iBp); pCtx->dr[7] &= ~X86_DR7_GD; LogFlow(("DBGFBpCheckIo: hit hw breakpoint %d at %04x:%RGv (iop %#x)\n", pVM->dbgf.s.aHwBreakpoints[iBp].iBp, pCtx->cs.Sel, pCtx->rip, uIoPort)); return VINF_EM_RAW_GUEST_TRAP; } } } } return VINF_SUCCESS; }
/** * Leaves a critical section entered with PDMCritSectEnter(). * * @param pCritSect The PDM critical section to leave. */ VMMDECL(void) PDMCritSectLeave(PPDMCRITSECT pCritSect) { AssertMsg(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC, ("%p %RX32\n", pCritSect, pCritSect->s.Core.u32Magic)); Assert(pCritSect->s.Core.u32Magic == RTCRITSECT_MAGIC); /* Check for NOP sections before asserting ownership. */ if (pCritSect->s.Core.fFlags & RTCRITSECT_FLAGS_NOP) return; /* * Always check that the caller is the owner (screw performance). */ RTNATIVETHREAD const hNativeSelf = pdmCritSectGetNativeSelf(pCritSect); AssertReleaseMsgReturnVoid(pCritSect->s.Core.NativeThreadOwner == hNativeSelf, ("%p %s: %p != %p; cLockers=%d cNestings=%d\n", pCritSect, R3STRING(pCritSect->s.pszName), pCritSect->s.Core.NativeThreadOwner, hNativeSelf, pCritSect->s.Core.cLockers, pCritSect->s.Core.cNestings)); Assert(pCritSect->s.Core.cNestings >= 1); /* * Nested leave. */ if (pCritSect->s.Core.cNestings > 1) { ASMAtomicDecS32(&pCritSect->s.Core.cNestings); Assert(pCritSect->s.Core.cNestings >= 1); ASMAtomicDecS32(&pCritSect->s.Core.cLockers); Assert(pCritSect->s.Core.cLockers >= 0); return; } #ifdef IN_RING0 # if 0 /** @todo Make SUPSemEventSignal interrupt safe (handle table++) and enable this for: defined(RT_OS_LINUX) || defined(RT_OS_OS2) */ if (1) /* SUPSemEventSignal is safe */ # else if (ASMIntAreEnabled()) # endif #endif #if defined(IN_RING3) || defined(IN_RING0) { /* * Leave for real. */ /* update members. */ # ifdef IN_RING3 RTSEMEVENT hEventToSignal = pCritSect->s.EventToSignal; pCritSect->s.EventToSignal = NIL_RTSEMEVENT; # if defined(PDMCRITSECT_STRICT) if (pCritSect->s.Core.pValidatorRec->hThread != NIL_RTTHREAD) RTLockValidatorRecExclReleaseOwnerUnchecked(pCritSect->s.Core.pValidatorRec); # endif Assert(!pCritSect->s.Core.pValidatorRec || pCritSect->s.Core.pValidatorRec->hThread == NIL_RTTHREAD); # endif ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK); ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); ASMAtomicDecS32(&pCritSect->s.Core.cNestings); Assert(pCritSect->s.Core.cNestings == 0); /* stop and decrement lockers. */ STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); ASMCompilerBarrier(); if (ASMAtomicDecS32(&pCritSect->s.Core.cLockers) >= 0) { /* Someone is waiting, wake up one of them. */ SUPSEMEVENT hEvent = (SUPSEMEVENT)pCritSect->s.Core.EventSem; PSUPDRVSESSION pSession = pCritSect->s.CTX_SUFF(pVM)->pSession; int rc = SUPSemEventSignal(pSession, hEvent); AssertRC(rc); } # ifdef IN_RING3 /* Signal exit event. */ if (hEventToSignal != NIL_RTSEMEVENT) { LogBird(("Signalling %#x\n", hEventToSignal)); int rc = RTSemEventSignal(hEventToSignal); AssertRC(rc); } # endif # if defined(DEBUG_bird) && defined(IN_RING0) VMMTrashVolatileXMMRegs(); # endif } #endif /* IN_RING3 || IN_RING0 */ #ifdef IN_RING0 else #endif #if defined(IN_RING0) || defined(IN_RC) { /* * Try leave it. */ if (pCritSect->s.Core.cLockers == 0) { ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 0); RTNATIVETHREAD hNativeThread = pCritSect->s.Core.NativeThreadOwner; ASMAtomicAndU32(&pCritSect->s.Core.fFlags, ~PDMCRITSECT_FLAGS_PENDING_UNLOCK); STAM_PROFILE_ADV_STOP(&pCritSect->s.StatLocked, l); ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, NIL_RTNATIVETHREAD); if (ASMAtomicCmpXchgS32(&pCritSect->s.Core.cLockers, -1, 0)) return; /* darn, someone raced in on us. */ ASMAtomicWriteHandle(&pCritSect->s.Core.NativeThreadOwner, hNativeThread); STAM_PROFILE_ADV_START(&pCritSect->s.StatLocked, l); Assert(pCritSect->s.Core.cNestings == 0); ASMAtomicWriteS32(&pCritSect->s.Core.cNestings, 1); } ASMAtomicOrU32(&pCritSect->s.Core.fFlags, PDMCRITSECT_FLAGS_PENDING_UNLOCK); /* * Queue the request. */ PVM pVM = pCritSect->s.CTX_SUFF(pVM); AssertPtr(pVM); PVMCPU pVCpu = VMMGetCpu(pVM); AssertPtr(pVCpu); uint32_t i = pVCpu->pdm.s.cQueuedCritSectLeaves++; LogFlow(("PDMCritSectLeave: [%d]=%p => R3\n", i, pCritSect)); AssertFatal(i < RT_ELEMENTS(pVCpu->pdm.s.apQueuedCritSectsLeaves)); pVCpu->pdm.s.apQueuedCritSectsLeaves[i] = MMHyperCCToR3(pVM, pCritSect); VMCPU_FF_SET(pVCpu, VMCPU_FF_PDM_CRITSECT); VMCPU_FF_SET(pVCpu, VMCPU_FF_TO_R3); STAM_REL_COUNTER_INC(&pVM->pdm.s.StatQueuedCritSectLeaves); STAM_REL_COUNTER_INC(&pCritSect->s.StatContentionRZUnlock); } #endif /* IN_RING0 || IN_RC */ }
/** * @interface_method_impl{USBPROXYBACK,pfnUrbQueue} */ static DECLCALLBACK(int) usbProxyFreeBSDUrbQueue(PUSBPROXYDEV pProxyDev, PVUSBURB pUrb) { PUSBPROXYDEVFBSD pDevFBSD = USBPROXYDEV_2_DATA(pProxyDev, PUSBPROXYDEVFBSD); PUSBENDPOINTFBSD pEndpointFBSD; struct usb_fs_endpoint *pXferEndpoint; struct usb_fs_start UsbFsStart; unsigned cFrames; uint8_t *pbData; int index; int ep_num; int rc; LogFlow(("usbProxyFreeBSDUrbQueue: pUrb=%p EndPt=%u Dir=%u\n", pUrb, (unsigned)pUrb->EndPt, (unsigned)pUrb->enmDir)); ep_num = pUrb->EndPt; if ((pUrb->enmType != VUSBXFERTYPE_MSG) && (pUrb->enmDir == VUSBDIRECTION_IN)) { /* set IN-direction bit */ ep_num |= 0x80; } index = 0; retry: index = usbProxyFreeBSDEndpointOpen(pProxyDev, ep_num, (pUrb->enmType == VUSBXFERTYPE_ISOC), index); if (index < 0) return VERR_INVALID_PARAMETER; pEndpointFBSD = &pDevFBSD->aSwEndpoint[index]; pXferEndpoint = &pDevFBSD->aHwEndpoint[index]; pbData = pUrb->abData; switch (pUrb->enmType) { case VUSBXFERTYPE_MSG: { pEndpointFBSD->apvData[0] = pbData; pEndpointFBSD->acbData[0] = 8; /* check wLength */ if (pbData[6] || pbData[7]) { pEndpointFBSD->apvData[1] = pbData + 8; pEndpointFBSD->acbData[1] = pbData[6] | (pbData[7] << 8); cFrames = 2; } else { pEndpointFBSD->apvData[1] = NULL; pEndpointFBSD->acbData[1] = 0; cFrames = 1; } LogFlow(("usbProxyFreeBSDUrbQueue: pUrb->cbData=%u, 0x%02x, " "0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x, 0x%02x\n", pUrb->cbData, pbData[0], pbData[1], pbData[2], pbData[3], pbData[4], pbData[5], pbData[6], pbData[7])); pXferEndpoint->timeout = USB_FS_TIMEOUT_NONE; pXferEndpoint->flags = USB_FS_FLAG_MULTI_SHORT_OK; break; } case VUSBXFERTYPE_ISOC: { unsigned i; for (i = 0; i < pUrb->cIsocPkts; i++) { if (i >= pEndpointFBSD->cMaxFrames) break; pEndpointFBSD->apvData[i] = pbData + pUrb->aIsocPkts[i].off; pEndpointFBSD->acbData[i] = pUrb->aIsocPkts[i].cb; } /* Timeout handling will be done during reap. */ pXferEndpoint->timeout = USB_FS_TIMEOUT_NONE; pXferEndpoint->flags = USB_FS_FLAG_MULTI_SHORT_OK; cFrames = i; break; } default: { pEndpointFBSD->apvData[0] = pbData; pEndpointFBSD->cbData0 = pUrb->cbData; /* XXX maybe we have to loop */ if (pUrb->cbData > pEndpointFBSD->cMaxIo) pEndpointFBSD->acbData[0] = pEndpointFBSD->cMaxIo; else pEndpointFBSD->acbData[0] = pUrb->cbData; /* Timeout handling will be done during reap. */ pXferEndpoint->timeout = USB_FS_TIMEOUT_NONE; pXferEndpoint->flags = pUrb->fShortNotOk ? 0 : USB_FS_FLAG_MULTI_SHORT_OK; cFrames = 1; break; } } /* store number of frames */ pXferEndpoint->nFrames = cFrames; /* zero-default */ memset(&UsbFsStart, 0, sizeof(UsbFsStart)); /* Start the transfer */ UsbFsStart.ep_index = index; rc = usbProxyFreeBSDDoIoCtl(pProxyDev, USB_FS_START, &UsbFsStart, true); LogFlow(("usbProxyFreeBSDUrbQueue: USB_FS_START returned rc=%d " "len[0]=%u len[1]=%u cbData=%u index=%u ep_num=%u\n", rc, (unsigned)pEndpointFBSD->acbData[0], (unsigned)pEndpointFBSD->acbData[1], (unsigned)pUrb->cbData, (unsigned)index, (unsigned)ep_num)); if (RT_FAILURE(rc)) { if (rc == VERR_RESOURCE_BUSY) { index++; goto retry; } return rc; } pUrb->Dev.pvPrivate = (void *)(long)(index + 1); pEndpointFBSD->pUrb = pUrb; return rc; }
static uint32_t parallel_ioport_read(void *opaque, uint32_t addr, int *pRC) { ParallelState *s = (ParallelState *)opaque; uint32_t ret = ~0U; *pRC = VINF_SUCCESS; addr &= 7; switch(addr) { default: case 0: if (!(s->reg_control & LPT_CONTROL_ENABLE_BIDIRECT)) ret = s->reg_data; else { #ifndef IN_RING3 *pRC = VINF_IOM_HC_IOPORT_READ; #else if (RT_LIKELY(s->pDrvHostParallelConnector)) { size_t cbRead; int rc = s->pDrvHostParallelConnector->pfnRead(s->pDrvHostParallelConnector, &s->reg_data, &cbRead); Log(("parallel_io_port_read: read 0x%X\n", s->reg_data)); AssertRC(rc); } ret = s->reg_data; #endif } break; case 1: #ifndef IN_RING3 *pRC = VINF_IOM_HC_IOPORT_READ; #else if (RT_LIKELY(s->pDrvHostParallelConnector)) { int rc = s->pDrvHostParallelConnector->pfnReadStatus(s->pDrvHostParallelConnector, &s->reg_status); AssertRC(rc); } ret = s->reg_status; parallel_clear_irq(s); #endif break; case 2: ret = s->reg_control; break; case 3: ret = s->reg_epp_addr; break; case 4: ret = s->reg_epp_data; break; case 5: break; case 6: break; case 7: break; } LogFlow(("parallel: read addr=0x%02x val=0x%02x\n", addr, ret)); return ret; }
/** * Checks for a Guest Additions update by comparing the installed version on the * guest and the reported host version. * * @returns VBox status code * * @param u32ClientId The client id returned by * VbglR3InfoSvcConnect(). * @param pfUpdate Receives pointer to boolean flag indicating * whether an update was found or not. * @param ppszHostVersion Receives pointer of allocated version string. * The returned pointer must be freed using * VbglR3GuestPropReadValueFree(). Always set to * NULL. * @param ppszGuestVersion Receives pointer of allocated revision string. * The returned pointer must be freed using * VbglR3GuestPropReadValueFree(). Always set to * NULL. */ VBGLR3DECL(int) VbglR3HostVersionCheckForUpdate(uint32_t u32ClientId, bool *pfUpdate, char **ppszHostVersion, char **ppszGuestVersion) { Assert(u32ClientId > 0); AssertPtr(pfUpdate); AssertPtr(ppszHostVersion); AssertPtr(ppszGuestVersion); *ppszHostVersion = NULL; *ppszGuestVersion = NULL; /* We assume we have an update initially. Every block down below is allowed to veto */ *pfUpdate = true; /* Do we need to do all this stuff? */ char *pszCheckHostVersion; int rc = VbglR3GuestPropReadValueAlloc(u32ClientId, "/VirtualBox/GuestAdd/CheckHostVersion", &pszCheckHostVersion); if (RT_FAILURE(rc)) { if (rc == VERR_NOT_FOUND) rc = VINF_SUCCESS; /* If we don't find the value above we do the check by default */ else LogFlow(("Could not read check host version flag! rc = %Rrc\n", rc)); } else { /* Only don't do the check if we have a valid "0" in it */ if (!strcmp(pszCheckHostVersion, "0")) { LogRel(("No host version update check performed (disabled).\n")); *pfUpdate = false; } VbglR3GuestPropReadValueFree(pszCheckHostVersion); } /* Collect all needed information */ /* Make sure we only notify the user once by comparing the host version with * the last checked host version (if any) */ if (RT_SUCCESS(rc) && *pfUpdate) { /* Look up host version */ rc = VbglR3GuestPropReadValueAlloc(u32ClientId, "/VirtualBox/HostInfo/VBoxVer", ppszHostVersion); if (RT_FAILURE(rc)) { LogFlow(("Could not read VBox host version! rc = %Rrc\n", rc)); } else { LogFlow(("Host version: %s\n", *ppszHostVersion)); /* Get last checked host version */ char *pszLastCheckedHostVersion; rc = VbglR3HostVersionLastCheckedLoad(u32ClientId, &pszLastCheckedHostVersion); if (RT_SUCCESS(rc)) { LogFlow(("Last checked host version: %s\n", pszLastCheckedHostVersion)); if (strcmp(*ppszHostVersion, pszLastCheckedHostVersion) == 0) *pfUpdate = false; /* We already notified this version, skip */ VbglR3GuestPropReadValueFree(pszLastCheckedHostVersion); } else if (rc == VERR_NOT_FOUND) /* Never wrote a last checked host version before */ { LogFlow(("Never checked a host version before.\n")); rc = VINF_SUCCESS; } } /* Look up guest version */ if (RT_SUCCESS(rc)) { rc = VbglR3GetAdditionsVersion(ppszGuestVersion, NULL /* Extended version not needed here */, NULL /* Revision not needed here */); if (RT_FAILURE(rc)) LogFlow(("Could not read VBox guest version! rc = %Rrc\n", rc)); } } /* Do the actual version comparison (if needed, see block(s) above) */ if (RT_SUCCESS(rc) && *pfUpdate) { if (RTStrVersionCompare(*ppszHostVersion, *ppszGuestVersion) > 0) /* Is host version greater than guest add version? */ { /* Yay, we have an update! */ LogRel(("Guest Additions update found! Please upgrade this machine to the latest Guest Additions.\n")); } else { /* How sad ... */ *pfUpdate = false; } } /* Cleanup on failure */ if (RT_FAILURE(rc)) { if (*ppszHostVersion) { VbglR3GuestPropReadValueFree(*ppszHostVersion); *ppszHostVersion = NULL; } if (*ppszGuestVersion) { VbglR3GuestPropReadValueFree(*ppszGuestVersion); *ppszGuestVersion = NULL; } } return rc; }
void Display::processDisplayData(void *pvVRAM, unsigned uScreenId) { if (uScreenId >= mcMonitors) { LogRel(("VBoxVideo: Guest display information invalid display index %d!!!\n", uScreenId)); return; } /* Get the display information structure. */ DISPLAYFBINFO *pFBInfo = &maFramebuffers[uScreenId]; uint8_t *pu8 = (uint8_t *)pvVRAM; pu8 += pFBInfo->u32Offset + pFBInfo->u32MaxFramebufferSize; // @todo uint8_t *pu8End = pu8 + pFBInfo->u32InformationSize; VBOXVIDEOINFOHDR *pHdr; for (;;) { pHdr = (VBOXVIDEOINFOHDR *)pu8; pu8 += sizeof(VBOXVIDEOINFOHDR); if (pu8 >= pu8End) { LogRel(("VBoxVideo: Guest display information overflow!!!\n")); break; } if (pHdr->u8Type == VBOX_VIDEO_INFO_TYPE_SCREEN) { if (pHdr->u16Length != sizeof(VBOXVIDEOINFOSCREEN)) { LogRel(("VBoxVideo: Guest display information %s invalid length %d!!!\n", "SCREEN", pHdr->u16Length)); break; } VBOXVIDEOINFOSCREEN *pScreen = (VBOXVIDEOINFOSCREEN *)pu8; pFBInfo->xOrigin = pScreen->xOrigin; pFBInfo->yOrigin = pScreen->yOrigin; pFBInfo->w = pScreen->u16Width; pFBInfo->h = pScreen->u16Height; LogRelFlow(("VBOX_VIDEO_INFO_TYPE_SCREEN: (%p) %d: at %d,%d, linesize 0x%X, size %dx%d, bpp %d, flags 0x%02X\n", pHdr, uScreenId, pScreen->xOrigin, pScreen->yOrigin, pScreen->u32LineSize, pScreen->u16Width, pScreen->u16Height, pScreen->bitsPerPixel, pScreen->u8Flags)); if (uScreenId != VBOX_VIDEO_PRIMARY_SCREEN) { /* Primary screen resize is eeeeeeeee by the VGA device. */ if (pFBInfo->fDisabled) { pFBInfo->fDisabled = false; fireGuestMonitorChangedEvent(mParent->i_getEventSource(), GuestMonitorChangedEventType_Enabled, uScreenId, pFBInfo->xOrigin, pFBInfo->yOrigin, pFBInfo->w, pFBInfo->h); } i_handleDisplayResize(uScreenId, pScreen->bitsPerPixel, (uint8_t *)pvVRAM + pFBInfo->u32Offset, pScreen->u32LineSize, pScreen->u16Width, pScreen->u16Height, VBVA_SCREEN_F_ACTIVE); } } else if (pHdr->u8Type == VBOX_VIDEO_INFO_TYPE_END) { if (pHdr->u16Length != 0) { LogRel(("VBoxVideo: Guest adapter information %s invalid length %d!!!\n", "END", pHdr->u16Length)); break; } break; } else if (pHdr->u8Type == VBOX_VIDEO_INFO_TYPE_HOST_EVENTS) { if (pHdr->u16Length != sizeof(VBOXVIDEOINFOHOSTEVENTS)) { LogRel(("VBoxVideo: Guest display information %s invalid length %d!!!\n", "HOST_EVENTS", pHdr->u16Length)); break; } VBOXVIDEOINFOHOSTEVENTS *pHostEvents = (VBOXVIDEOINFOHOSTEVENTS *)pu8; pFBInfo->pHostEvents = pHostEvents; LogFlow(("VBOX_VIDEO_INFO_TYPE_HOSTEVENTS: (%p)\n", pHostEvents)); } else if (pHdr->u8Type == VBOX_VIDEO_INFO_TYPE_LINK) { if (pHdr->u16Length != sizeof(VBOXVIDEOINFOLINK)) { LogRel(("VBoxVideo: Guest adapter information %s invalid length %d!!!\n", "LINK", pHdr->u16Length)); break; } VBOXVIDEOINFOLINK *pLink = (VBOXVIDEOINFOLINK *)pu8; pu8 += pLink->i32Offset; } else { LogRel(("Guest display information contains unsupported type %d\n", pHdr->u8Type)); } pu8 += pHdr->u16Length; } }