VMMR3DECL(int) IEMR3Init(PVM pVM) { for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; pVCpu->iem.s.offVM = -RT_OFFSETOF(VM, aCpus[idCpu].iem.s); pVCpu->iem.s.offVMCpu = -RT_OFFSETOF(VMCPU, iem.s); pVCpu->iem.s.pCtxR3 = CPUMQueryGuestCtxPtr(pVCpu); pVCpu->iem.s.pCtxR0 = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3); pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3); STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential exists", "/IEM/CPU%u/cPotentialExits", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu); } return VINF_SUCCESS; }
RTDECL(bool) RTThreadPreemptIsPending(RTTHREAD hThread) { Assert(hThread == NIL_RTTHREAD); RT_NOREF1(hThread); /* * Read the globals and check if they are useful. */ /** @todo Should we check KPRCB.InterruptRequest and KPRCB.DpcInterruptRequested (older kernels). */ uint32_t const offQuantumEnd = g_offrtNtPbQuantumEnd; uint32_t const cbQuantumEnd = g_cbrtNtPbQuantumEnd; uint32_t const offDpcQueueDepth = g_offrtNtPbDpcQueueDepth; if (!offQuantumEnd && !cbQuantumEnd && !offDpcQueueDepth) return false; Assert((offQuantumEnd && cbQuantumEnd) || (!offQuantumEnd && !cbQuantumEnd)); /* * Disable interrupts so we won't be messed around. */ bool fPending; RTCCUINTREG fSavedFlags = ASMIntDisableFlags(); #ifdef RT_ARCH_X86 PKPCR pPcr = (PKPCR)__readfsdword(RT_OFFSETOF(KPCR,SelfPcr)); uint8_t *pbPrcb = (uint8_t *)pPcr->Prcb; #elif defined(RT_ARCH_AMD64) /* HACK ALERT! The offset is from windbg/vista64. */ PKPCR pPcr = (PKPCR)__readgsqword(RT_OFFSETOF(KPCR,Self)); uint8_t *pbPrcb = (uint8_t *)pPcr->CurrentPrcb; #else # error "port me" #endif /* Check QuantumEnd. */ if (cbQuantumEnd == 1) { uint8_t volatile *pbQuantumEnd = (uint8_t volatile *)(pbPrcb + offQuantumEnd); fPending = *pbQuantumEnd == TRUE; } else if (cbQuantumEnd == sizeof(uint32_t)) { uint32_t volatile *pu32QuantumEnd = (uint32_t volatile *)(pbPrcb + offQuantumEnd); fPending = *pu32QuantumEnd != 0; } else fPending = false; /* Check DpcQueueDepth. */ if ( !fPending && offDpcQueueDepth) { uint32_t volatile *pu32DpcQueueDepth = (uint32_t volatile *)(pbPrcb + offDpcQueueDepth); fPending = *pu32DpcQueueDepth > 0; } ASMSetFlags(fSavedFlags); return fPending; }
/** * Initializes the interpreted execution manager. * * This must be called after CPUM as we're quering information from CPUM about * the guest and host CPUs. * * @returns VBox status code. * @param pVM The cross context VM structure. */ VMMR3DECL(int) IEMR3Init(PVM pVM) { for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) { PVMCPU pVCpu = &pVM->aCpus[idCpu]; pVCpu->iem.s.offVM = -RT_OFFSETOF(VM, aCpus[idCpu].iem.s); pVCpu->iem.s.offVMCpu = -RT_OFFSETOF(VMCPU, iem.s); pVCpu->iem.s.pCtxR3 = CPUMQueryGuestCtxPtr(pVCpu); pVCpu->iem.s.pCtxR0 = VM_R0_ADDR(pVM, pVCpu->iem.s.pCtxR3); pVCpu->iem.s.pCtxRC = VM_RC_ADDR(pVM, pVCpu->iem.s.pCtxR3); STAMR3RegisterF(pVM, &pVCpu->iem.s.cInstructions, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Instructions interpreted", "/IEM/CPU%u/cInstructions", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cPotentialExits, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Potential exits", "/IEM/CPU%u/cPotentialExits", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetAspectNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "VERR_IEM_ASPECT_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetAspectNotImplemented", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInstrNotImplemented, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "VERR_IEM_INSTR_NOT_IMPLEMENTED", "/IEM/CPU%u/cRetInstrNotImplemented", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetInfStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Informational statuses returned", "/IEM/CPU%u/cRetInfStatuses", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cRetErrStatuses, STAMTYPE_U32_RESET, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Error statuses returned", "/IEM/CPU%u/cRetErrStatuses", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cbWritten, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Approx bytes written", "/IEM/CPU%u/cbWritten", idCpu); STAMR3RegisterF(pVM, &pVCpu->iem.s.cPendingCommit, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Times RC/R0 had to postpone instruction committing to ring-3", "/IEM/CPU%u/cPendingCommit", idCpu); /* * Host and guest CPU information. */ if (idCpu == 0) { pVCpu->iem.s.enmCpuVendor = CPUMGetGuestCpuVendor(pVM); pVCpu->iem.s.enmHostCpuVendor = CPUMGetHostCpuVendor(pVM); } else { pVCpu->iem.s.enmCpuVendor = pVM->aCpus[0].iem.s.enmCpuVendor; pVCpu->iem.s.enmHostCpuVendor = pVM->aCpus[0].iem.s.enmHostCpuVendor; } /* * Mark all buffers free. */ uint32_t iMemMap = RT_ELEMENTS(pVCpu->iem.s.aMemMappings); while (iMemMap-- > 0) pVCpu->iem.s.aMemMappings[iMemMap].fAccess = IEM_ACCESS_INVALID; } return VINF_SUCCESS; }
RTDECL(int) RTAsn1ContentAllocZ(PRTASN1CORE pAsn1Core, size_t cb, PCRTASN1ALLOCATORVTABLE pAllocator) { AssertReturn(pAllocator != NULL, VERR_WRONG_ORDER); AssertReturn(cb > 0 && cb < _1G, VERR_INVALID_PARAMETER); AssertPtr(pAsn1Core); AssertReturn(!(pAsn1Core->fFlags & RTASN1CORE_F_ALLOCATED_CONTENT), VERR_INVALID_STATE); /* Initialize the temporary allocation tracker. */ RTASN1ALLOCATION Allocation; Allocation.cbAllocated = 0; Allocation.cReallocs = 0; Allocation.uReserved0 = 0; Allocation.pAllocator = pAllocator; /* Make the allocation. */ uint32_t cbAlloc = RT_OFFSETOF(RTASN1MEMCONTENT, au64Content) + (uint32_t)cb; PRTASN1MEMCONTENT pHdr; int rc = pAllocator->pfnAlloc(pAllocator, &Allocation, (void **)&pHdr, cbAlloc); if (RT_SUCCESS(rc)) { Assert(Allocation.cbAllocated >= cbAlloc); pHdr->Allocation = Allocation; pAsn1Core->cb = (uint32_t)cb; pAsn1Core->uData.pv = &pHdr->au64Content[0]; pAsn1Core->fFlags |= RTASN1CORE_F_ALLOCATED_CONTENT; } return rc; }
int vboxNetAdpCreate(PINTNETTRUNKFACTORY pIfFactory, PVBOXNETADP *ppNew) { int rc; unsigned i; PVBOXNETADPGLOBALS pGlobals = (PVBOXNETADPGLOBALS)((uint8_t *)pIfFactory - RT_OFFSETOF(VBOXNETADPGLOBALS, TrunkFactory)); for (i = 0; i < RT_ELEMENTS(pGlobals->aAdapters); i++) { RTSPINLOCKTMP Tmp = RTSPINLOCKTMP_INITIALIZER; PVBOXNETADP pThis = &pGlobals->aAdapters[i]; if (vboxNetAdpCheckAndSetState(pThis, kVBoxNetAdpState_Invalid, kVBoxNetAdpState_Transitional)) { /* Found an empty slot -- use it. */ uint32_t cRefs = ASMAtomicIncU32(&pThis->cRefs); Assert(cRefs == 1); RTMAC Mac; vboxNetAdpComposeMACAddress(pThis, &Mac); rc = vboxNetAdpOsCreate(pThis, &Mac); *ppNew = pThis; RTSpinlockAcquireNoInts(pThis->hSpinlock, &Tmp); vboxNetAdpSetState(pThis, kVBoxNetAdpState_Available); RTSpinlockReleaseNoInts(pThis->hSpinlock, &Tmp); return rc; } } /* All slots in adapter array are busy. */ return VERR_OUT_OF_RESOURCES; }
VBOXUSBTOOL_DECL(NTSTATUS) VBoxUsbToolGetStringDescriptor(PDEVICE_OBJECT pDevObj, char *pszResult, ULONG cbResult, int iIndex, int LangId, ULONG dwTimeoutMs) { char aBuf[MAXIMUM_USB_STRING_LENGTH]; AssertCompile(sizeof (aBuf) <= UINT8_MAX); UCHAR cbBuf = (UCHAR)sizeof (aBuf); PUSB_STRING_DESCRIPTOR pDr = (PUSB_STRING_DESCRIPTOR)&aBuf; Assert(pszResult); *pszResult = 0; memset(pDr, 0, cbBuf); pDr->bLength = cbBuf; pDr->bDescriptorType = USB_STRING_DESCRIPTOR_TYPE; NTSTATUS Status = VBoxUsbToolGetDescriptor(pDevObj, pDr, cbBuf, USB_STRING_DESCRIPTOR_TYPE, iIndex, LangId, dwTimeoutMs); if (NT_SUCCESS(Status)) { if (pDr->bLength >= sizeof (USB_STRING_DESCRIPTOR)) { int rc = RTUtf16ToUtf8Ex(pDr->bString, (pDr->bLength - RT_OFFSETOF(USB_STRING_DESCRIPTOR, bString)) / sizeof(RTUTF16), &pszResult, cbResult, NULL /*pcch*/); if (RT_SUCCESS(rc)) { USBLibPurgeEncoding(pszResult); Status = STATUS_SUCCESS; } else Status = STATUS_UNSUCCESSFUL; } else Status = STATUS_INVALID_PARAMETER; } return Status; }
/** * Loads the block descriptor of the given block group from the medium. * * @returns IPRT status code. * @param pThis EXT filesystem instance data. * @param iBlkGrp Block group number to load. */ static int rtFsExtLoadBlkGrpDesc(PRTFILESYSTEMEXT pThis, uint32_t iBlkGrp) { int rc = VINF_SUCCESS; PRTFILESYSTEMEXTBLKGRP pBlkGrpDesc = pThis->pBlkGrpDesc; uint64_t offRead = (pThis->iSbBlock + 1) * pThis->cbBlock; BlockGroupDesc BlkDesc; size_t cbBlockBitmap; cbBlockBitmap = pThis->cBlocksPerGroup / 8; if (pThis->cBlocksPerGroup % 8) cbBlockBitmap++; if (!pBlkGrpDesc) { size_t cbBlkDesc = RT_OFFSETOF(RTFILESYSTEMEXTBLKGRP, abBlockBitmap[cbBlockBitmap]); pBlkGrpDesc = (PRTFILESYSTEMEXTBLKGRP)RTMemAllocZ(cbBlkDesc); if (!pBlkGrpDesc) return VERR_NO_MEMORY; } rc = RTVfsFileReadAt(pThis->hVfsFile, offRead, &BlkDesc, sizeof(BlkDesc), NULL); if (RT_SUCCESS(rc)) { pBlkGrpDesc->offStart = pThis->iSbBlock + (uint64_t)iBlkGrp * pThis->cBlocksPerGroup * pThis->cbBlock; pBlkGrpDesc->offLast = pBlkGrpDesc->offStart + pThis->cBlocksPerGroup * pThis->cbBlock; rc = RTVfsFileReadAt(pThis->hVfsFile, BlkDesc.offBlockBitmap * pThis->cbBlock, &pBlkGrpDesc->abBlockBitmap[0], cbBlockBitmap, NULL); } pThis->pBlkGrpDesc = pBlkGrpDesc; return rc; }
/** * @copydoc RAWPCIFACTORY::pfnCreateAndConnect */ static DECLCALLBACK(int) vboxPciFactoryCreateAndConnect(PRAWPCIFACTORY pFactory, uint32_t u32HostAddress, uint32_t fFlags, PRAWPCIPERVM pVmCtx, PRAWPCIDEVPORT *ppDevPort, uint32_t *pfDevFlags) { PVBOXRAWPCIGLOBALS pGlobals = (PVBOXRAWPCIGLOBALS)((uint8_t *)pFactory - RT_OFFSETOF(VBOXRAWPCIGLOBALS, RawPciFactory)); int rc; LogFlow(("vboxPciFactoryCreateAndConnect: PCI=%x fFlags=%#x\n", u32HostAddress, fFlags)); Assert(pGlobals->cFactoryRefs > 0); rc = vboxPciGlobalsLock(pGlobals); AssertRCReturn(rc, rc); /* First search if there's no existing instance with same host device * address - if so - we cannot continue. */ if (vboxPciFindInstanceLocked(pGlobals, u32HostAddress) != NULL) { rc = VERR_RESOURCE_BUSY; goto unlock; } rc = vboxPciNewInstance(pGlobals, u32HostAddress, fFlags, pVmCtx, ppDevPort, pfDevFlags); unlock: vboxPciGlobalsUnlock(pGlobals); return rc; }
DECLHIDDEN(int) rtR0MemObjNativeLockUser(PPRTR0MEMOBJINTERNAL ppMem, RTR3PTR R3Ptr, size_t cb, uint32_t fAccess, RTR0PROCESS R0Process) { AssertMsgReturn(R0Process == RTR0ProcHandleSelf(), ("%p != %p\n", R0Process, RTR0ProcHandleSelf()), VERR_NOT_SUPPORTED); /* create the object. */ const ULONG cPages = cb >> PAGE_SHIFT; PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOCK, (void *)R3Ptr, cb); if (!pMemOs2) return VERR_NO_MEMORY; /* lock it. */ ULONG cPagesRet = cPages; int rc = KernVMLock(VMDHL_LONG | (fAccess & RTMEM_PROT_WRITE ? VMDHL_WRITE : 0), (void *)R3Ptr, cb, &pMemOs2->Lock, &pMemOs2->aPages[0], &cPagesRet); if (!rc) { rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); Assert(cb == pMemOs2->Core.cb); Assert(R3Ptr == (RTR3PTR)pMemOs2->Core.pv); pMemOs2->Core.u.Lock.R0Process = R0Process; *ppMem = &pMemOs2->Core; return VINF_SUCCESS; } rtR0MemObjDelete(&pMemOs2->Core); return RTErrConvertFromOS2(rc); }
DECLHIDDEN(int) rtR0MemObjNativeAllocPhys(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, RTHCPHYS PhysHighest, size_t uAlignment) { AssertMsgReturn(PhysHighest >= 16 *_1M, ("PhysHigest=%RHp\n", PhysHighest), VERR_NOT_SUPPORTED); /** @todo alignment */ if (uAlignment != PAGE_SIZE) return VERR_NOT_SUPPORTED; /* create the object. */ PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, Lock), RTR0MEMOBJTYPE_PHYS, NULL, cb); if (!pMemOs2) return VERR_NO_MEMORY; /* do the allocation. */ ULONG ulPhys = ~0UL; int rc = KernVMAlloc(cb, VMDHA_FIXED | VMDHA_CONTIG | (PhysHighest < _4G ? VMDHA_16M : 0), &pMemOs2->Core.pv, (PPVOID)&ulPhys, NULL); if (!rc) { Assert(ulPhys != ~0UL); pMemOs2->Core.u.Phys.fAllocated = true; pMemOs2->Core.u.Phys.PhysBase = ulPhys; *ppMem = &pMemOs2->Core; return VINF_SUCCESS; } rtR0MemObjDelete(&pMemOs2->Core); return RTErrConvertFromOS2(rc); }
DECLHIDDEN(int) rtR0MemObjNativeAllocLow(PPRTR0MEMOBJINTERNAL ppMem, size_t cb, bool fExecutable) { NOREF(fExecutable); /* create the object. */ const ULONG cPages = cb >> PAGE_SHIFT; PRTR0MEMOBJOS2 pMemOs2 = (PRTR0MEMOBJOS2)rtR0MemObjNew(RT_OFFSETOF(RTR0MEMOBJOS2, aPages[cPages]), RTR0MEMOBJTYPE_LOW, NULL, cb); if (!pMemOs2) return VERR_NO_MEMORY; /* do the allocation. */ int rc = KernVMAlloc(cb, VMDHA_FIXED, &pMemOs2->Core.pv, (PPVOID)-1, NULL); if (!rc) { ULONG cPagesRet = cPages; rc = KernLinToPageList(pMemOs2->Core.pv, cb, &pMemOs2->aPages[0], &cPagesRet); if (!rc) { rtR0MemObjFixPageList(&pMemOs2->aPages[0], cPages, cPagesRet); *ppMem = &pMemOs2->Core; return VINF_SUCCESS; } KernVMFree(pMemOs2->Core.pv); } rtR0MemObjDelete(&pMemOs2->Core); rc = RTErrConvertFromOS2(rc); return rc == VERR_NO_MEMORY ? VERR_NO_LOW_MEMORY : rc; }
/** * Implements the SUPDRV component factor interface query method. * * @returns Pointer to an interface. NULL if not supported. * * @param pSupDrvFactory Pointer to the component factory registration structure. * @param pSession The session - unused. * @param pszInterfaceUuid The factory interface id. */ static DECLCALLBACK(void *) vboxNetAdpQueryFactoryInterface(PCSUPDRVFACTORY pSupDrvFactory, PSUPDRVSESSION pSession, const char *pszInterfaceUuid) { PVBOXNETADPGLOBALS pGlobals = (PVBOXNETADPGLOBALS)((uint8_t *)pSupDrvFactory - RT_OFFSETOF(VBOXNETADPGLOBALS, SupDrvFactory)); /* * Convert the UUID strings and compare them. */ RTUUID UuidReq; int rc = RTUuidFromStr(&UuidReq, pszInterfaceUuid); if (RT_SUCCESS(rc)) { if (!RTUuidCompareStr(&UuidReq, INTNETTRUNKFACTORY_UUID_STR)) { ASMAtomicIncS32(&pGlobals->cFactoryRefs); return &pGlobals->TrunkFactory; } #ifdef LOG_ENABLED else Log(("VBoxNetAdp: unknown factory interface query (%s)\n", pszInterfaceUuid)); #endif } else Log(("VBoxNetAdp: rc=%Rrc, uuid=%s\n", rc, pszInterfaceUuid)); return NULL; }
/** * Performs a GMMR0FreePages request. * This will call VMSetError on failure. * * @returns VBox status code. * @param pVM The cross context VM structure. * @param pReq Pointer to the request (returned by GMMR3FreePagesPrepare). * @param cActualPages The number of pages actually freed. */ GMMR3DECL(int) GMMR3FreePagesPerform(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cActualPages) { /* * Adjust the request if we ended up with fewer pages than anticipated. */ if (cActualPages != pReq->cPages) { AssertReturn(cActualPages < pReq->cPages, VERR_GMM_ACTUAL_PAGES_IPE); if (!cActualPages) return VINF_SUCCESS; pReq->cPages = cActualPages; pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cActualPages]); } /* * Do the job. */ int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_FREE_PAGES, 0, &pReq->Hdr); if (RT_SUCCESS(rc)) return rc; AssertRC(rc); return VMSetError(pVM, rc, RT_SRC_POS, N_("GMMR0FreePages failed to free %u pages"), pReq->cPages); }
DECLCALLBACK(int) vboxUhgsmiBaseEscBufferSubmit(PVBOXUHGSMI pHgsmi, PVBOXUHGSMI_BUFFER_SUBMIT aBuffers, uint32_t cBuffers) { /* we no chromium will not submit more than three buffers actually, * for simplicity allocate it statically on the stack */ struct { VBOXDISPIFESCAPE_UHGSMI_SUBMIT SubmitInfo; VBOXWDDM_UHGSMI_BUFFER_UI_INFO_ESCAPE aBufInfos[3]; } Buf; if (!cBuffers || cBuffers > RT_ELEMENTS(Buf.aBufInfos) + 1) { WARN(("invalid cBuffers!")); return VERR_INVALID_PARAMETER; } HANDLE hSynch = VBOXUHGSMIESCBASE_GET_BUFFER(aBuffers[0].pBuf)->hSynch; if (!hSynch) { WARN(("the fist buffer is not command!")); return VERR_INVALID_PARAMETER; } PVBOXUHGSMI_PRIVATE_BASE pPrivate = VBOXUHGSMIBASE_GET(pHgsmi); Buf.SubmitInfo.EscapeHdr.escapeCode = VBOXESC_UHGSMI_SUBMIT; Buf.SubmitInfo.EscapeHdr.u32CmdSpecific = cBuffers; for (UINT i = 0; i < cBuffers; ++i) { VBOXWDDM_UHGSMI_BUFFER_UI_INFO_ESCAPE *pSubmInfo = &Buf.SubmitInfo.aBuffers[i]; PVBOXUHGSMI_BUFFER_SUBMIT pBufInfo = &aBuffers[i]; PVBOXUHGSMI_BUFFER_PRIVATE_ESC_BASE pBuf = VBOXUHGSMIESCBASE_GET_BUFFER(pBufInfo->pBuf); pSubmInfo->hAlloc = pBuf->Alloc.hAlloc; if (pBufInfo->fFlags.bEntireBuffer) { pSubmInfo->Info.offData = 0; pSubmInfo->Info.cbData = pBuf->BasePrivate.Base.cbBuffer; } else { pSubmInfo->Info.offData = pBufInfo->offData; pSubmInfo->Info.cbData = pBufInfo->cbData; } } int rc = vboxCrHgsmiPrivateEscape(pPrivate, &Buf.SubmitInfo, RT_OFFSETOF(VBOXDISPIFESCAPE_UHGSMI_SUBMIT, aBuffers[cBuffers]), FALSE); if (RT_SUCCESS(rc)) { DWORD dwResult = WaitForSingleObject(hSynch, INFINITE); if (dwResult == WAIT_OBJECT_0) return VINF_SUCCESS; WARN(("wait failed, (0x%x)", dwResult)); return VERR_GENERAL_FAILURE; } else { WARN(("vboxCrHgsmiPrivateEscape failed, rc (%d)", rc)); } return VERR_GENERAL_FAILURE; }
int rtDirNativeOpen(PRTDIR pDir, char *pszPathBuf) { NOREF(pszPathBuf); /* only used on windows */ /* * Convert to a native path and try opendir. */ char const *pszNativePath; int rc = rtPathToNative(&pszNativePath, pDir->pszPath, NULL); if (RT_SUCCESS(rc)) { pDir->pDir = opendir(pszNativePath); if (pDir->pDir) { /* * Init data. */ pDir->fDataUnread = false; memset(&pDir->Data, 0, RT_OFFSETOF(RTDIR, Data.d_name)); /* not strictly necessary */ memset(&pDir->Data.d_name[0], 0, pDir->cbMaxName); } else rc = RTErrConvertFromErrno(errno); rtPathFreeNative(pszNativePath, pDir->pszPath); } return rc; }
/** * @copydoc INTNETTRUNKFACTORY::pfnCreateAndConnect */ static DECLCALLBACK(int) vboxNetAdpFactoryCreateAndConnect(PINTNETTRUNKFACTORY pIfFactory, const char *pszName, PINTNETTRUNKSWPORT pSwitchPort, uint32_t fFlags, PINTNETTRUNKIFPORT *ppIfPort) { PVBOXNETADPGLOBALS pGlobals = (PVBOXNETADPGLOBALS)((uint8_t *)pIfFactory - RT_OFFSETOF(VBOXNETADPGLOBALS, TrunkFactory)); PVBOXNETADP pThis; int rc; LogFlow(("vboxNetAdpFactoryCreateAndConnect: pszName=%p:{%s} fFlags=%#x\n", pszName, pszName, fFlags)); Assert(pGlobals->cFactoryRefs > 0); AssertMsgReturn(!fFlags, ("%#x\n", fFlags), VERR_INVALID_PARAMETER); /* * Find instance, check if busy, connect if not. */ pThis = vboxNetAdpFind(pGlobals, pszName); if (pThis) { if (vboxNetAdpCheckAndSetState(pThis, kVBoxNetAdpState_Available, kVBoxNetAdpState_Transitional)) { vboxNetAdpRetain(pThis); rc = vboxNetAdpConnectIt(pThis, pSwitchPort, ppIfPort); vboxNetAdpSetStateWithLock(pThis, RT_SUCCESS(rc) ? kVBoxNetAdpState_Connected : kVBoxNetAdpState_Available); } else rc = VERR_INTNET_FLT_IF_BUSY; } else rc = VERR_INTNET_FLT_IF_NOT_FOUND; return rc; }
/** * Open part with reader. * * @returns iprt status code. * @param pReader The loader reader instance which will provide the raw image bits. * @param fFlags Reserved, MBZ. * @param enmArch Architecture specifier. * @param phMod Where to store the handle. */ int rtldrOpenWithReader(PRTLDRREADER pReader, uint32_t fFlags, RTLDRARCH enmArch, PRTLDRMOD phMod) { /* * Read and verify the file signature. */ union { char ach[4]; uint16_t au16[2]; uint32_t u32; } uSign; int rc = pReader->pfnRead(pReader, &uSign, sizeof(uSign), 0); if (RT_FAILURE(rc)) return rc; #ifndef LDR_WITH_KLDR if ( uSign.au16[0] != IMAGE_DOS_SIGNATURE && uSign.u32 != IMAGE_NT_SIGNATURE && uSign.u32 != IMAGE_ELF_SIGNATURE && uSign.au16[0] != IMAGE_LX_SIGNATURE) { Log(("rtldrOpenWithReader: %s: unknown magic %#x / '%.4s\n", pReader->pfnLogName(pReader), uSign.u32, &uSign.ach[0])); return VERR_INVALID_EXE_SIGNATURE; } #endif uint32_t offHdr = 0; if (uSign.au16[0] == IMAGE_DOS_SIGNATURE) { rc = pReader->pfnRead(pReader, &offHdr, sizeof(offHdr), RT_OFFSETOF(IMAGE_DOS_HEADER, e_lfanew)); if (RT_FAILURE(rc)) return rc; if (offHdr <= sizeof(IMAGE_DOS_HEADER)) { Log(("rtldrOpenWithReader: %s: no new header / invalid offset %#RX32\n", pReader->pfnLogName(pReader), offHdr)); return VERR_INVALID_EXE_SIGNATURE; } rc = pReader->pfnRead(pReader, &uSign, sizeof(uSign), offHdr); if (RT_FAILURE(rc)) return rc; if ( uSign.u32 != IMAGE_NT_SIGNATURE && uSign.au16[0] != IMAGE_LX_SIGNATURE && uSign.au16[0] != IMAGE_LE_SIGNATURE && uSign.au16[0] != IMAGE_NE_SIGNATURE) { Log(("rtldrOpenWithReader: %s: unknown new magic %#x / '%.4s\n", pReader->pfnLogName(pReader), uSign.u32, &uSign.ach[0])); return VERR_INVALID_EXE_SIGNATURE; } } /* * Create image interpreter instance depending on the signature. */ if (uSign.u32 == IMAGE_NT_SIGNATURE) #ifdef LDR_WITH_PE rc = rtldrPEOpen(pReader, fFlags, enmArch, offHdr, phMod); #else rc = VERR_PE_EXE_NOT_SUPPORTED; #endif else if (uSign.u32 == IMAGE_ELF_SIGNATURE)
/** * Re-prepares a GMMR0FreePages request. * * @returns VINF_SUCCESS or VERR_NO_TMP_MEMORY. * @param pVM The cross context VM structure. * @param pReq A request buffer previously returned by * GMMR3FreePagesPrepare(). * @param cPages The number of pages originally passed to * GMMR3FreePagesPrepare(). * @param enmAccount The account to charge. */ GMMR3DECL(void) GMMR3FreePagesRePrep(PVM pVM, PGMMFREEPAGESREQ pReq, uint32_t cPages, GMMACCOUNT enmAccount) { Assert(pReq->Hdr.u32Magic == SUPVMMR0REQHDR_MAGIC); pReq->Hdr.cbReq = RT_OFFSETOF(GMMFREEPAGESREQ, aPages[cPages]); pReq->enmAccount = enmAccount; pReq->cPages = cPages; NOREF(pVM); }
void vringSetNotification(PVPCISTATE pState, PVRING pVRing, bool fEnabled) { uint16_t tmp; PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, uFlags), &tmp, sizeof(tmp)); if (fEnabled) tmp &= ~ VRINGUSED_F_NO_NOTIFY; else tmp |= VRINGUSED_F_NO_NOTIFY; PDMDevHlpPhysWrite(pState->CTX_SUFF(pDevIns), pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, uFlags), &tmp, sizeof(tmp)); }
/** * @copydoc RAWPCIFACTORY::pfnRelease */ static DECLCALLBACK(void) vboxPciFactoryRelease(PRAWPCIFACTORY pFactory) { PVBOXRAWPCIGLOBALS pGlobals = (PVBOXRAWPCIGLOBALS)((uint8_t *)pFactory - RT_OFFSETOF(VBOXRAWPCIGLOBALS, RawPciFactory)); int32_t cRefs = ASMAtomicDecS32(&pGlobals->cFactoryRefs); Assert(cRefs >= 0); NOREF(cRefs); LogFlow(("vboxPciFactoryRelease: cRefs=%d (new)\n", cRefs)); }
uint16_t vringReadUsedIndex(PVPCISTATE pState, PVRING pVRing) { uint16_t tmp; PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), pVRing->addrUsed + RT_OFFSETOF(VRINGUSED, uIndex), &tmp, sizeof(tmp)); return tmp; }
/** * Duplicates a line. * * @returns VBox status code. * @param pVM The VM handle. * @param pLine The line to duplicate. */ static PDBGFLINE dbgfR3LineDup(PVM pVM, PCDBGFLINE pLine) { size_t cb = strlen(pLine->szFilename) + RT_OFFSETOF(DBGFLINE, szFilename[1]); PDBGFLINE pDup = (PDBGFLINE)MMR3HeapAlloc(pVM, MM_TAG_DBGF_LINE_DUP, cb); if (pDup) memcpy(pDup, pLine, cb); return pDup; }
/** * @copydoc INTNETTRUNKFACTORY::pfnRelease */ static DECLCALLBACK(void) vboxNetAdpFactoryRelease(PINTNETTRUNKFACTORY pIfFactory) { PVBOXNETADPGLOBALS pGlobals = (PVBOXNETADPGLOBALS)((uint8_t *)pIfFactory - RT_OFFSETOF(VBOXNETADPGLOBALS, TrunkFactory)); int32_t cRefs = ASMAtomicDecS32(&pGlobals->cFactoryRefs); Assert(cRefs >= 0); NOREF(cRefs); LogFlow(("vboxNetAdpFactoryRelease: cRefs=%d (new)\n", cRefs)); }
DECLCALLBACK(int) vboxUhgsmiKmtEscBufferSubmitAsynch(PVBOXUHGSMI pHgsmi, PVBOXUHGSMI_BUFFER_SUBMIT aBuffers, uint32_t cBuffers) { /* we no chromium will not submit more than three buffers actually, * for simplicity allocate it statically on the stack */ struct { VBOXDISPIFESCAPE_UHGSMI_SUBMIT SubmitInfo; VBOXWDDM_UHGSMI_BUFFER_UI_INFO_ESCAPE aBufInfos[3]; } Buf; if (cBuffers > RT_ELEMENTS(Buf.aBufInfos) + 1) { Assert(0); return VERR_INVALID_PARAMETER; } PVBOXUHGSMI_PRIVATE_KMT pPrivate = VBOXUHGSMIKMT_GET(pHgsmi); D3DKMT_ESCAPE DdiEscape = {0}; DdiEscape.hAdapter = pPrivate->Adapter.hAdapter; DdiEscape.hDevice = pPrivate->Device.hDevice; DdiEscape.Type = D3DKMT_ESCAPE_DRIVERPRIVATE; //Buf.DdiEscape.Flags.HardwareAccess = 1; DdiEscape.pPrivateDriverData = &Buf.SubmitInfo; DdiEscape.PrivateDriverDataSize = RT_OFFSETOF(VBOXDISPIFESCAPE_UHGSMI_SUBMIT, aBuffers[cBuffers]); DdiEscape.hContext = pPrivate->Context.hContext; Buf.SubmitInfo.EscapeHdr.escapeCode = VBOXESC_UHGSMI_SUBMIT; Buf.SubmitInfo.EscapeHdr.u32CmdSpecific = cBuffers; for (UINT i = 0; i < cBuffers; ++i) { VBOXWDDM_UHGSMI_BUFFER_UI_INFO_ESCAPE *pSubmInfo = &Buf.SubmitInfo.aBuffers[i]; PVBOXUHGSMI_BUFFER_SUBMIT pBufInfo = &aBuffers[i]; PVBOXUHGSMI_BUFFER_PRIVATE_KMT_ESC pBuf = VBOXUHGSMIKMTESC_GET_BUFFER(pBufInfo->pBuf); pSubmInfo->hAlloc = pBuf->Alloc.hAlloc; pSubmInfo->Info.fSubFlags = pBufInfo->fFlags; if (pBufInfo->fFlags.bEntireBuffer) { pSubmInfo->Info.offData = 0; pSubmInfo->Info.cbData = pBuf->Base.cbBuffer; } else { pSubmInfo->Info.offData = pBufInfo->offData; pSubmInfo->Info.cbData = pBufInfo->cbData; } } HRESULT hr = pPrivate->Callbacks.pfnD3DKMTEscape(&DdiEscape); Assert(hr == S_OK); if (hr == S_OK) { return VINF_SUCCESS; } return VERR_GENERAL_FAILURE; }
/** * Registers a new shared module for the VM * * @returns IPRT status code. * @param pszModuleName Module name * @param pszVersion Module version * @param GCBaseAddr Module base address * @param cbModule Module size * @param cRegions Number of shared region descriptors * @param pRegions Shared region(s) */ VBGLR3DECL(int) VbglR3RegisterSharedModule(char *pszModuleName, char *pszVersion, RTGCPTR64 GCBaseAddr, uint32_t cbModule, unsigned cRegions, VMMDEVSHAREDREGIONDESC *pRegions) { VMMDevSharedModuleRegistrationRequest *pReq; int rc; /* Sanity check. */ AssertReturn(cRegions < VMMDEVSHAREDREGIONDESC_MAX, VERR_INVALID_PARAMETER); pReq = (VMMDevSharedModuleRegistrationRequest *)RTMemAllocZ(RT_OFFSETOF(VMMDevSharedModuleRegistrationRequest, aRegions[cRegions])); AssertReturn(pReq, VERR_NO_MEMORY); vmmdevInitRequest(&pReq->header, VMMDevReq_RegisterSharedModule); pReq->header.size = RT_OFFSETOF(VMMDevSharedModuleRegistrationRequest, aRegions[cRegions]); pReq->GCBaseAddr = GCBaseAddr; pReq->cbModule = cbModule; pReq->cRegions = cRegions; #ifdef RT_OS_WINDOWS # if ARCH_BITS == 32 pReq->enmGuestOS = VBOXOSFAMILY_Windows32; # else pReq->enmGuestOS = VBOXOSFAMILY_Windows64; # endif #else /** todo */ pReq->enmGuestOS = VBOXOSFAMILY_Unknown; #endif for (unsigned i = 0; i < cRegions; i++) pReq->aRegions[i] = pRegions[i]; if ( RTStrCopy(pReq->szName, sizeof(pReq->szName), pszModuleName) != VINF_SUCCESS || RTStrCopy(pReq->szVersion, sizeof(pReq->szVersion), pszVersion) != VINF_SUCCESS) { rc = VERR_BUFFER_OVERFLOW; goto end; } rc = vbglR3GRPerform(&pReq->header); end: RTMemFree(pReq); return rc; }
uint16_t vringReadAvailFlags(PVPCISTATE pState, PVRING pVRing) { uint16_t tmp; PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), pVRing->addrAvail + RT_OFFSETOF(VRINGAVAIL, uFlags), &tmp, sizeof(tmp)); return tmp; }
uint32_t VBoxCVDdiPTransferVRamSysBuildEls(VBOXCMDVBVA_PAGING_TRANSFER *pCmd, PMDL pMdl, uint32_t iPfn, uint32_t cPages, uint32_t cbBuffer, uint32_t *pcPagesWritten) { uint32_t cbInitBuffer = cbBuffer; uint32_t i = 0; VBOXCMDVBVAPAGEIDX *pPageNumbers = pCmd->Data.aPageNumbers; cbBuffer -= RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers); for (; i < cPages && cbBuffer >= sizeof (*pPageNumbers); ++i, cbBuffer -= sizeof (*pPageNumbers)) { pPageNumbers[i] = (VBOXCMDVBVAPAGEIDX)(MmGetMdlPfnArray(pMdl)[iPfn + i]); } *pcPagesWritten = i; Assert(cbInitBuffer - cbBuffer == RT_OFFSETOF(VBOXCMDVBVA_PAGING_TRANSFER, Data.aPageNumbers[i])); Assert(cbInitBuffer - cbBuffer >= sizeof (VBOXCMDVBVA_PAGING_TRANSFER)); return cbInitBuffer - cbBuffer; }
/** * Reports a state change of a specific guest user. * * @returns IPRT status value * @param pszUser Guest user name to report state for. * @param pszDomain Domain the guest user's account is bound to. * @param enmState Guest user state to report. * @param puDetails Pointer to state details. Optional. * @param cbDetails Size (in bytes) of state details. Pass 0 * if puDetails is NULL. */ VBGLR3DECL(int) VbglR3GuestUserReportState(const char *pszUser, const char *pszDomain, VBoxGuestUserState enmState, uint8_t *puDetails, uint32_t cbDetails) { AssertPtrReturn(pszUser, VERR_INVALID_POINTER); /* pszDomain is optional. */ /* puDetails is optional. */ AssertReturn(cbDetails == 0 || puDetails != NULL, VERR_INVALID_PARAMETER); AssertReturn(cbDetails < 16U*_1M, VERR_OUT_OF_RANGE); uint32_t cbBase = sizeof(VMMDevReportGuestUserState); uint32_t cbUser = (uint32_t)strlen(pszUser) + 1; /* Include terminating zero */ uint32_t cbDomain = pszDomain ? strlen(pszDomain) + 1 /* Ditto */ : 0; /* Allocate enough space for all fields. */ uint32_t cbSize = cbBase + cbUser + cbDomain + cbDetails; VMMDevReportGuestUserState *pReport = (VMMDevReportGuestUserState *)RTMemAllocZ(cbSize); if (!pReport) return VERR_NO_MEMORY; int rc = vmmdevInitRequest(&pReport->header, VMMDevReq_ReportGuestUserState); if (RT_SUCCESS(rc)) { pReport->header.size = cbSize; pReport->status.state = enmState; pReport->status.cbUser = cbUser; pReport->status.cbDomain = cbDomain; pReport->status.cbDetails = cbDetails; /* * Note: cbOffDynamic contains the first dynamic array entry within * VBoxGuestUserStatus. * Therefore it's vital to *not* change the order of the struct members * without altering this code. Don't try this at home. */ uint32_t cbOffDynamic = RT_OFFSETOF(VBoxGuestUserStatus, szUser); /* pDynamic marks the beginning for the dynamically allocated areas. */ uint8_t *pDynamic = (uint8_t *)&pReport->status; pDynamic += cbOffDynamic; AssertPtr(pDynamic); memcpy(pDynamic, pszUser, cbUser); if (cbDomain) memcpy(pDynamic + cbUser, pszDomain, cbDomain); if (cbDetails) memcpy(pDynamic + cbUser + cbDomain, puDetails, cbDetails); rc = vbglR3GRPerform(&pReport->header); } RTMemFree(pReport); return rc; }
uint16_t vringReadAvail(PVPCISTATE pState, PVRING pVRing, uint32_t uIndex) { uint16_t tmp; PDMDevHlpPhysRead(pState->CTX_SUFF(pDevIns), pVRing->addrAvail + RT_OFFSETOF(VRINGAVAIL, auRing[uIndex % pVRing->uSize]), &tmp, sizeof(tmp)); return tmp; }
/** * @see GMMR0RegisterSharedModule */ GMMR3DECL(int) GMMR3RegisterSharedModule(PVM pVM, PGMMREGISTERSHAREDMODULEREQ pReq) { pReq->Hdr.u32Magic = SUPVMMR0REQHDR_MAGIC; pReq->Hdr.cbReq = RT_OFFSETOF(GMMREGISTERSHAREDMODULEREQ, aRegions[pReq->cRegions]); int rc = VMMR3CallR0(pVM, VMMR0_DO_GMM_REGISTER_SHARED_MODULE, 0, &pReq->Hdr); if (rc == VINF_SUCCESS) rc = pReq->rc; return rc; }