/** * Ensures that there is space for at least @a cNewRanges in the table, * reallocating the table if necessary. * * @returns Pointer to the MSR ranges on success, NULL on failure. On failure * @a *ppaMsrRanges is freed and set to NULL. * @param pVM The cross context VM structure. If NULL, * use the process heap, otherwise the VM's hyper heap. * @param ppaMsrRanges The variable pointing to the ranges (input/output). * @param cMsrRanges The current number of ranges. * @param cNewRanges The number of ranges to be added. */ static PCPUMMSRRANGE cpumR3MsrRangesEnsureSpace(PVM pVM, PCPUMMSRRANGE *ppaMsrRanges, uint32_t cMsrRanges, uint32_t cNewRanges) { uint32_t cMsrRangesAllocated; if (!pVM) cMsrRangesAllocated = RT_ALIGN_32(cMsrRanges, 16); else { /* * We're using the hyper heap now, but when the range array was copied over to it from * the host-context heap, we only copy the exact size and not the ensured size. * See @bugref{7270}. */ cMsrRangesAllocated = cMsrRanges; } if (cMsrRangesAllocated < cMsrRanges + cNewRanges) { void *pvNew; uint32_t cNew = RT_ALIGN_32(cMsrRanges + cNewRanges, 16); if (pVM) { Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3); Assert(cMsrRanges == pVM->cpum.s.GuestInfo.cMsrRanges); size_t cb = cMsrRangesAllocated * sizeof(**ppaMsrRanges); size_t cbNew = cNew * sizeof(**ppaMsrRanges); int rc = MMR3HyperRealloc(pVM, *ppaMsrRanges, cb, 32, MM_TAG_CPUM_MSRS, cbNew, &pvNew); if (RT_FAILURE(rc)) { *ppaMsrRanges = NULL; pVM->cpum.s.GuestInfo.paMsrRangesR0 = NIL_RTR0PTR; pVM->cpum.s.GuestInfo.paMsrRangesRC = NIL_RTRCPTR; LogRel(("CPUM: cpumR3MsrRangesEnsureSpace: MMR3HyperRealloc failed. rc=%Rrc\n", rc)); return NULL; } *ppaMsrRanges = (PCPUMMSRRANGE)pvNew; } else { pvNew = RTMemRealloc(*ppaMsrRanges, cNew * sizeof(**ppaMsrRanges)); if (!pvNew) { RTMemFree(*ppaMsrRanges); *ppaMsrRanges = NULL; return NULL; } } *ppaMsrRanges = (PCPUMMSRRANGE)pvNew; } if (pVM) { /* Update R0 and RC pointers. */ Assert(ppaMsrRanges == &pVM->cpum.s.GuestInfo.paMsrRangesR3); pVM->cpum.s.GuestInfo.paMsrRangesR0 = MMHyperR3ToR0(pVM, *ppaMsrRanges); pVM->cpum.s.GuestInfo.paMsrRangesRC = MMHyperR3ToRC(pVM, *ppaMsrRanges); } return *ppaMsrRanges; }
VMMR3DECL(void) IEMR3Relocate(PVM pVM) { for (VMCPUID idCpu = 0; idCpu < pVM->cCpus; idCpu++) { pVM->aCpus[idCpu].iem.s.pCtxRC = VM_RC_ADDR(pVM, pVM->aCpus[idCpu].iem.s.pCtxR3); if (pVM->aCpus[idCpu].iem.s.pStatsRC) pVM->aCpus[idCpu].iem.s.pStatsRC = MMHyperR3ToRC(pVM, pVM->aCpus[idCpu].iem.s.pStatsCCR3); } }
/** * Mark a page as scanned/not scanned * * @note: we always mark it as scanned, even if we haven't completely done so * * @returns VBox status code. * @param pVM Pointer to the VM. * @param pPage GC page address (not necessarily aligned) * @param fScanned Mark as scanned or not scanned * */ VMM_INT_DECL(int) CSAMMarkPage(PVM pVM, RTRCUINTPTR pPage, bool fScanned) { int pgdir, bit; uintptr_t page; #ifdef LOG_ENABLED if (fScanned && !CSAMIsPageScanned(pVM, (RTRCPTR)pPage)) Log(("CSAMMarkPage %RRv\n", pPage)); #endif if (!CSAMIsEnabled(pVM)) return VINF_SUCCESS; Assert(!HMIsEnabled(pVM)); page = (uintptr_t)pPage; pgdir = page >> X86_PAGE_4M_SHIFT; bit = (page & X86_PAGE_4M_OFFSET_MASK) >> X86_PAGE_4K_SHIFT; Assert(pgdir < CSAM_PGDIRBMP_CHUNKS); Assert(bit < PAGE_SIZE); if(!CTXSUFF(pVM->csam.s.pPDBitmap)[pgdir]) { STAM_COUNTER_INC(&pVM->csam.s.StatBitmapAlloc); int rc = MMHyperAlloc(pVM, CSAM_PAGE_BITMAP_SIZE, 0, MM_TAG_CSAM, (void **)&pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir]); if (RT_FAILURE(rc)) { Log(("MMHyperAlloc failed with %Rrc\n", rc)); return rc; } #ifdef IN_RC pVM->csam.s.pPDHCBitmapGC[pgdir] = MMHyperRCToR3(pVM, (RCPTRTYPE(void*))pVM->csam.s.pPDBitmapGC[pgdir]); if (!pVM->csam.s.pPDHCBitmapGC[pgdir]) { Log(("MMHyperHC2GC failed for %RRv\n", pVM->csam.s.pPDBitmapGC[pgdir])); return rc; } #else pVM->csam.s.pPDGCBitmapHC[pgdir] = MMHyperR3ToRC(pVM, pVM->csam.s.pPDBitmapHC[pgdir]); if (!pVM->csam.s.pPDGCBitmapHC[pgdir]) { Log(("MMHyperHC2GC failed for %RHv\n", pVM->csam.s.pPDBitmapHC[pgdir])); return rc; } #endif } if(fScanned) ASMBitSet((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit); else ASMBitClear((void *)pVM->csam.s.CTXSUFF(pPDBitmap)[pgdir], bit); return VINF_SUCCESS; }
/** * Free an item. * * @param pQueue The queue. * @param pItem The item. */ DECLINLINE(void) pdmR3QueueFreeItem(PPDMQUEUE pQueue, PPDMQUEUEITEMCORE pItem) { VM_ASSERT_EMT(pQueue->pVMR3); int i = pQueue->iFreeHead; int iNext = (i + 1) % (pQueue->cItems + PDMQUEUE_FREE_SLACK); pQueue->aFreeItems[i].pItemR3 = pItem; if (pQueue->pVMRC) { pQueue->aFreeItems[i].pItemRC = MMHyperR3ToRC(pQueue->pVMR3, pItem); pQueue->aFreeItems[i].pItemR0 = MMHyperR3ToR0(pQueue->pVMR3, pItem); } if (!ASMAtomicCmpXchgU32(&pQueue->iFreeHead, iNext, i)) AssertMsgFailed(("huh? i=%d iNext=%d iFreeHead=%d iFreeTail=%d\n", i, iNext, pQueue->iFreeHead, pQueue->iFreeTail)); STAM_STATS({ ASMAtomicDecU32(&pQueue->cStatPending); });
/** * Relocate the queues. * * @param pVM Pointer to the VM. * @param offDelta The relocation delta. */ void pdmR3QueueRelocate(PVM pVM, RTGCINTPTR offDelta) { /* * Process the queues. */ PUVM pUVM = pVM->pUVM; PPDMQUEUE pQueueNext = pUVM->pdm.s.pQueuesTimer; PPDMQUEUE pQueue = pUVM->pdm.s.pQueuesForced; do { while (pQueue) { if (pQueue->pVMRC) { pQueue->pVMRC = pVM->pVMRC; /* Pending RC items. */ if (pQueue->pPendingRC) { pQueue->pPendingRC += offDelta; PPDMQUEUEITEMCORE pCur = (PPDMQUEUEITEMCORE)MMHyperRCToR3(pVM, pQueue->pPendingRC); while (pCur->pNextRC) { pCur->pNextRC += offDelta; pCur = (PPDMQUEUEITEMCORE)MMHyperRCToR3(pVM, pCur->pNextRC); } } /* The free items. */ uint32_t i = pQueue->iFreeTail; while (i != pQueue->iFreeHead) { pQueue->aFreeItems[i].pItemRC = MMHyperR3ToRC(pVM, pQueue->aFreeItems[i].pItemR3); i = (i + 1) % (pQueue->cItems + PDMQUEUE_FREE_SLACK); } } /* next queue */ pQueue = pQueue->pNext; } /* next queue list */ pQueue = pQueueNext; pQueueNext = NULL; } while (pQueue); }
/** * Internal worker for the queue creation apis. * * @returns VBox status. * @param pVM Pointer to the VM. * @param cbItem Item size. * @param cItems Number of items. * @param cMilliesInterval Number of milliseconds between polling the queue. * If 0 then the emulation thread will be notified whenever an item arrives. * @param fRZEnabled Set if the queue will be used from RC/R0 and need to be allocated from the hyper heap. * @param pszName The queue name. Unique. Not copied. * @param ppQueue Where to store the queue handle. */ static int pdmR3QueueCreate(PVM pVM, size_t cbItem, uint32_t cItems, uint32_t cMilliesInterval, bool fRZEnabled, const char *pszName, PPDMQUEUE *ppQueue) { PUVM pUVM = pVM->pUVM; /* * Validate input. */ AssertMsgReturn(cbItem >= sizeof(PDMQUEUEITEMCORE) && cbItem < _1M, ("cbItem=%zu\n", cbItem), VERR_OUT_OF_RANGE); AssertMsgReturn(cItems >= 1 && cItems <= _64K, ("cItems=%u\n", cItems), VERR_OUT_OF_RANGE); /* * Align the item size and calculate the structure size. */ cbItem = RT_ALIGN(cbItem, sizeof(RTUINTPTR)); size_t cb = cbItem * cItems + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16); PPDMQUEUE pQueue; int rc; if (fRZEnabled) rc = MMHyperAlloc(pVM, cb, 0, MM_TAG_PDM_QUEUE, (void **)&pQueue ); else rc = MMR3HeapAllocZEx(pVM, MM_TAG_PDM_QUEUE, cb, (void **)&pQueue); if (RT_FAILURE(rc)) return rc; /* * Initialize the data fields. */ pQueue->pVMR3 = pVM; pQueue->pVMR0 = fRZEnabled ? pVM->pVMR0 : NIL_RTR0PTR; pQueue->pVMRC = fRZEnabled ? pVM->pVMRC : NIL_RTRCPTR; pQueue->pszName = pszName; pQueue->cMilliesInterval = cMilliesInterval; //pQueue->pTimer = NULL; pQueue->cbItem = (uint32_t)cbItem; pQueue->cItems = cItems; //pQueue->pPendingR3 = NULL; //pQueue->pPendingR0 = NULL; //pQueue->pPendingRC = NULL; pQueue->iFreeHead = cItems; //pQueue->iFreeTail = 0; PPDMQUEUEITEMCORE pItem = (PPDMQUEUEITEMCORE)((char *)pQueue + RT_ALIGN_Z(RT_OFFSETOF(PDMQUEUE, aFreeItems[cItems + PDMQUEUE_FREE_SLACK]), 16)); for (unsigned i = 0; i < cItems; i++, pItem = (PPDMQUEUEITEMCORE)((char *)pItem + cbItem)) { pQueue->aFreeItems[i].pItemR3 = pItem; if (fRZEnabled) { pQueue->aFreeItems[i].pItemR0 = MMHyperR3ToR0(pVM, pItem); pQueue->aFreeItems[i].pItemRC = MMHyperR3ToRC(pVM, pItem); } } /* * Create timer? */ if (cMilliesInterval) { rc = TMR3TimerCreateInternal(pVM, TMCLOCK_REAL, pdmR3QueueTimer, pQueue, "Queue timer", &pQueue->pTimer); if (RT_SUCCESS(rc)) { rc = TMTimerSetMillies(pQueue->pTimer, cMilliesInterval); if (RT_FAILURE(rc)) { AssertMsgFailed(("TMTimerSetMillies failed rc=%Rrc\n", rc)); int rc2 = TMR3TimerDestroy(pQueue->pTimer); AssertRC(rc2); } } else AssertMsgFailed(("TMR3TimerCreateInternal failed rc=%Rrc\n", rc)); if (RT_FAILURE(rc)) { if (fRZEnabled) MMHyperFree(pVM, pQueue); else MMR3HeapFree(pQueue); return rc; } /* * Insert into the queue list for timer driven queues. */ pdmLock(pVM); pQueue->pNext = pUVM->pdm.s.pQueuesTimer; pUVM->pdm.s.pQueuesTimer = pQueue; pdmUnlock(pVM); } else { /* * Insert into the queue list for forced action driven queues. * This is a FIFO, so insert at the end. */ /** @todo we should add a priority to the queues so we don't have to rely on * the initialization order to deal with problems like @bugref{1605} (pgm/pcnet * deadlock caused by the critsect queue to be last in the chain). * - Update, the critical sections are no longer using queues, so this isn't a real * problem any longer. The priority might be a nice feature for later though. */ pdmLock(pVM); if (!pUVM->pdm.s.pQueuesForced) pUVM->pdm.s.pQueuesForced = pQueue; else { PPDMQUEUE pPrev = pUVM->pdm.s.pQueuesForced; while (pPrev->pNext) pPrev = pPrev->pNext; pPrev->pNext = pQueue; } pdmUnlock(pVM); } /* * Register the statistics. */ STAMR3RegisterF(pVM, &pQueue->cbItem, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_BYTES, "Item size.", "/PDM/Queue/%s/cbItem", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->cItems, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Queue size.", "/PDM/Queue/%s/cItems", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatAllocFailures, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "PDMQueueAlloc failures.", "/PDM/Queue/%s/AllocFailures", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatInsert, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Calls to PDMQueueInsert.", "/PDM/Queue/%s/Insert", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatFlush, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Calls to pdmR3QueueFlush.", "/PDM/Queue/%s/Flush", pQueue->pszName); STAMR3RegisterF(pVM, &pQueue->StatFlushLeftovers, STAMTYPE_COUNTER, STAMVISIBILITY_ALWAYS, STAMUNIT_OCCURENCES, "Left over items after flush.", "/PDM/Queue/%s/FlushLeftovers", pQueue->pszName); #ifdef VBOX_WITH_STATISTICS STAMR3RegisterF(pVM, &pQueue->StatFlushPrf, STAMTYPE_PROFILE, STAMVISIBILITY_ALWAYS, STAMUNIT_CALLS, "Profiling pdmR3QueueFlush.", "/PDM/Queue/%s/FlushPrf", pQueue->pszName); STAMR3RegisterF(pVM, (void *)&pQueue->cStatPending, STAMTYPE_U32, STAMVISIBILITY_ALWAYS, STAMUNIT_COUNT, "Pending items.", "/PDM/Queue/%s/Pending", pQueue->pszName); #endif *ppQueue = pQueue; return VINF_SUCCESS; }
static DECLCALLBACK(int) doit(PVM pVM) { RTPrintf(TESTCASE ": testing...\n"); SetupSelectors(pVM); /* * Loading the module and resolve the entry point. */ int rc = PDMR3LdrLoadRC(pVM, NULL, "tstMicroRC.gc"); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": Failed to load tstMicroRC.gc, rc=%Rra\n", rc); return rc; } RTRCPTR RCPtrEntry; rc = PDMR3LdrGetSymbolRC(pVM, "tstMicroRC.gc", "tstMicroRC", &RCPtrEntry); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": Failed to resolve the 'tstMicroRC' entry point in tstMicroRC.gc, rc=%Rra\n", rc); return rc; } RTRCPTR RCPtrStart; rc = PDMR3LdrGetSymbolRC(pVM, "tstMicroRC.gc", "tstMicroRCAsmStart", &RCPtrStart); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": Failed to resolve the 'tstMicroRCAsmStart' entry point in tstMicroRC.gc, rc=%Rra\n", rc); return rc; } RTRCPTR RCPtrEnd; rc = PDMR3LdrGetSymbolRC(pVM, "tstMicroRC.gc", "tstMicroRCAsmEnd", &RCPtrEnd); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": Failed to resolve the 'tstMicroRCAsmEnd' entry point in tstMicroRC.gc, rc=%Rra\n", rc); return rc; } /* * Allocate and initialize the instance data. */ PTSTMICRO pTst; rc = MMHyperAlloc(pVM, RT_ALIGN_Z(sizeof(*pTst), PAGE_SIZE), PAGE_SIZE, MM_TAG_VM, (void **)&pTst); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": Failed to resolve allocate instance memory (%d bytes), rc=%Rra\n", sizeof(*pTst), rc); return rc; } pTst->RCPtr = MMHyperR3ToRC(pVM, pTst); pTst->RCPtrStack = MMHyperR3ToRC(pVM, &pTst->au8Stack[sizeof(pTst->au8Stack) - 32]); /* the page must be writable from user mode */ rc = PGMMapModifyPage(pVM, pTst->RCPtr, sizeof(*pTst), X86_PTE_US | X86_PTE_RW, ~(uint64_t)(X86_PTE_US | X86_PTE_RW)); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": PGMMapModifyPage -> rc=%Rra\n", rc); return rc; } /* all the code must be executable from R3. */ rc = PGMMapModifyPage(pVM, RCPtrStart, RCPtrEnd - RCPtrStart + PAGE_SIZE, X86_PTE_US, ~(uint64_t)X86_PTE_US); if (RT_FAILURE(rc)) { RTPrintf(TESTCASE ": PGMMapModifyPage -> rc=%Rra\n", rc); return rc; } DBGFR3PagingDumpEx(pVM->pUVM, 0 /*idCpu*/, DBGFPGDMP_FLAGS_CURRENT_CR3 | DBGFPGDMP_FLAGS_CURRENT_MODE | DBGFPGDMP_FLAGS_SHADOW | DBGFPGDMP_FLAGS_HEADER | DBGFPGDMP_FLAGS_PRINT_CR3, 0 /*cr3*/, 0 /*u64FirstAddr*/, UINT64_MAX /*u64LastAddr*/, 99 /*cMaxDepth*/, NULL); #if 0 /* * Disassemble the assembly... */ RTGCPTR GCPtr = RCPtrStart; while (GCPtr < RCPtrEnd) { size_t cb = 0; char sz[256]; int rc = DBGFR3DisasInstrEx(pVM, CPUMGetHyperCS(pVM), GCPtr, 0, sz, sizeof(sz), &cb); if (RT_SUCCESS(rc)) RTLogPrintf("%s\n", sz); else { RTLogPrintf("%RGv rc=%Rrc\n", GCPtr, rc); cb = 1; } GCPtr += cb; } #endif #ifdef VBOX_WITH_RAW_MODE /* * Do the profiling. */ /* execute the instruction profiling tests */ PrintHeaderInstr(); int i; for (i = TSTMICROTEST_OVERHEAD; i < TSTMICROTEST_TRAP_FIRST; i++) { TSTMICROTEST enmTest = (TSTMICROTEST)i; uint64_t cMin = ~0; uint64_t cMax = 0; uint64_t cTotal = 0; unsigned cSamples = 0; rc = VINF_SUCCESS; for (int c = 0; c < 100; c++) { int rc2 = VMMR3CallRC(pVM, RCPtrEntry, 2, pTst->RCPtr, enmTest); if (RT_SUCCESS(rc2)) { uint64_t u64 = pTst->aResults[enmTest].cTotalTicks; if (cMin > u64) cMin = u64; if (cMax < u64) cMax = u64; cTotal += u64; cSamples++; } else if (RT_SUCCESS(rc)) rc = rc2; } uint64_t cAvg = cTotal / (cSamples ? cSamples : 1); pTst->aResults[enmTest].cTotalTicks = cAvg; PrintResultInstr(pTst, enmTest, rc, cMin, cAvg, cMax); /* store the overhead */ if (enmTest == TSTMICROTEST_OVERHEAD) pTst->u64Overhead = cMin; } #endif #ifdef VBOX_WITH_RAW_MODE /* execute the trap/cycle profiling tests. */ RTPrintf("\n"); PrintHeaderTraps(); /* don't disable rdtsc in R1/R2/R3! */ CPUMR3SetCR4Feature(pVM, 0, ~X86_CR4_TSD); for (i = TSTMICROTEST_TRAP_FIRST; i < TSTMICROTEST_MAX; i++) { TSTMICROTEST enmTest = (TSTMICROTEST)i; rc = VMMR3CallRC(pVM, RCPtrEntry, 2, pTst->RCPtr, enmTest); PrintResultTrap(pTst, enmTest, rc); } #endif RTPrintf(TESTCASE ": done!\n"); return VINF_SUCCESS; }
int MsixInit(PCPDMPCIHLP pPciHlp, PPCIDEVICE pDev, PPDMMSIREG pMsiReg) { if (pMsiReg->cMsixVectors == 0) return VINF_SUCCESS; /* We cannot init MSI-X on raw devices yet. */ Assert(!pciDevIsPassthrough(pDev)); uint16_t cVectors = pMsiReg->cMsixVectors; uint8_t iCapOffset = pMsiReg->iMsixCapOffset; uint8_t iNextOffset = pMsiReg->iMsixNextOffset; uint8_t iBar = pMsiReg->iMsixBar; if (cVectors > VBOX_MSIX_MAX_ENTRIES) { AssertMsgFailed(("Too many MSI-X vectors: %d\n", cVectors)); return VERR_TOO_MUCH_DATA; } if (iBar > 5) { AssertMsgFailed(("Using wrong BAR for MSI-X: %d\n", iBar)); return VERR_INVALID_PARAMETER; } Assert(iCapOffset != 0 && iCapOffset < 0xff && iNextOffset < 0xff); int rc = VINF_SUCCESS; /* If device is passthrough, BAR is registered using common mechanism. */ if (!pciDevIsPassthrough(pDev)) { rc = PDMDevHlpPCIIORegionRegister (pDev->pDevIns, iBar, 0x1000, PCI_ADDRESS_SPACE_MEM, msixMap); if (RT_FAILURE (rc)) return rc; } pDev->Int.s.u8MsixCapOffset = iCapOffset; pDev->Int.s.u8MsixCapSize = VBOX_MSIX_CAP_SIZE; PVM pVM = PDMDevHlpGetVM(pDev->pDevIns); pDev->Int.s.pMsixPageR3 = NULL; rc = MMHyperAlloc(pVM, 0x1000, 1, MM_TAG_PDM_DEVICE_USER, (void **)&pDev->Int.s.pMsixPageR3); if (RT_FAILURE(rc) || (pDev->Int.s.pMsixPageR3 == NULL)) return VERR_NO_VM_MEMORY; RT_BZERO(pDev->Int.s.pMsixPageR3, 0x1000); pDev->Int.s.pMsixPageR0 = MMHyperR3ToR0(pVM, pDev->Int.s.pMsixPageR3); pDev->Int.s.pMsixPageRC = MMHyperR3ToRC(pVM, pDev->Int.s.pMsixPageR3); /* R3 PCI helper */ pDev->Int.s.pPciBusPtrR3 = pPciHlp; PCIDevSetByte(pDev, iCapOffset + 0, VBOX_PCI_CAP_ID_MSIX); PCIDevSetByte(pDev, iCapOffset + 1, iNextOffset); /* next */ PCIDevSetWord(pDev, iCapOffset + VBOX_MSIX_CAP_MESSAGE_CONTROL, cVectors - 1); uint32_t offTable = 0, offPBA = 0x800; PCIDevSetDWord(pDev, iCapOffset + VBOX_MSIX_TABLE_BIROFFSET, offTable | iBar); PCIDevSetDWord(pDev, iCapOffset + VBOX_MSIX_PBA_BIROFFSET, offPBA | iBar); pciDevSetMsixCapable(pDev); return VINF_SUCCESS; }
/** * Gets the raw-mode context address of the NOP critical section. * * @returns The raw-mode context address of the NOP critical section. * @param pVM Pointer to the VM. */ VMMR3DECL(RCPTRTYPE(PPDMCRITSECT)) PDMR3CritSectGetNopRC(PVM pVM) { VM_ASSERT_VALID_EXT_RETURN(pVM, NIL_RTRCPTR); return MMHyperR3ToRC(pVM, &pVM->pdm.s.NopCritSect); }