int main(int argc, char **argv) { int rcRet = 0; int i; int rc; int cIterations = argc > 1 ? RTStrToUInt32(argv[1]) : 32; if (cIterations == 0) cIterations = 64; /* * Init. */ RTR3InitExe(argc, &argv, 0); PSUPDRVSESSION pSession; rc = SUPR3Init(&pSession); rcRet += rc != 0; RTPrintf("tstInt: SUPR3Init -> rc=%Rrc\n", rc); char szFile[RTPATH_MAX]; if (!rc) { rc = RTPathExecDir(szFile, sizeof(szFile) - sizeof("/VMMR0.r0")); } char szAbsFile[RTPATH_MAX]; if (RT_SUCCESS(rc)) { strcat(szFile, "/VMMR0.r0"); rc = RTPathAbs(szFile, szAbsFile, sizeof(szAbsFile)); } if (RT_SUCCESS(rc)) { /* * Load VMM code. */ rc = SUPR3LoadVMM(szAbsFile); if (RT_SUCCESS(rc)) { /* * Create a fake 'VM'. */ PVMR0 pVMR0 = NIL_RTR0PTR; PVM pVM = NULL; const unsigned cPages = RT_ALIGN_Z(sizeof(*pVM), PAGE_SIZE) >> PAGE_SHIFT; PSUPPAGE paPages = (PSUPPAGE)RTMemAllocZ(cPages * sizeof(SUPPAGE)); if (paPages) rc = SUPR3LowAlloc(cPages, (void **)&pVM, &pVMR0, &paPages[0]); else rc = VERR_NO_MEMORY; if (RT_SUCCESS(rc)) { pVM->pVMRC = 0; pVM->pVMR3 = pVM; pVM->pVMR0 = pVMR0; pVM->paVMPagesR3 = paPages; pVM->pSession = pSession; pVM->enmVMState = VMSTATE_CREATED; rc = SUPR3SetVMForFastIOCtl(pVMR0); if (!rc) { /* * Call VMM code with invalid function. */ for (i = cIterations; i > 0; i--) { rc = SUPR3CallVMMR0(pVMR0, NIL_VMCPUID, VMMR0_DO_SLOW_NOP, NULL); if (rc != VINF_SUCCESS) { RTPrintf("tstInt: SUPR3CallVMMR0 -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } RTPrintf("tstInt: Performed SUPR3CallVMMR0 %d times (rc=%Rrc)\n", cIterations, rc); /* * The fast path. */ if (rc == VINF_SUCCESS) { RTTimeNanoTS(); uint64_t StartTS = RTTimeNanoTS(); uint64_t StartTick = ASMReadTSC(); uint64_t MinTicks = UINT64_MAX; for (i = 0; i < 1000000; i++) { uint64_t OneStartTick = ASMReadTSC(); rc = SUPR3CallVMMR0Fast(pVMR0, VMMR0_DO_NOP, 0); uint64_t Ticks = ASMReadTSC() - OneStartTick; if (Ticks < MinTicks) MinTicks = Ticks; if (RT_UNLIKELY(rc != VINF_SUCCESS)) { RTPrintf("tstInt: SUPR3CallVMMR0Fast -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } uint64_t Ticks = ASMReadTSC() - StartTick; uint64_t NanoSecs = RTTimeNanoTS() - StartTS; RTPrintf("tstInt: SUPR3CallVMMR0Fast - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); /* * The ordinary path. */ RTTimeNanoTS(); StartTS = RTTimeNanoTS(); StartTick = ASMReadTSC(); MinTicks = UINT64_MAX; for (i = 0; i < 1000000; i++) { uint64_t OneStartTick = ASMReadTSC(); rc = SUPR3CallVMMR0Ex(pVMR0, NIL_VMCPUID, VMMR0_DO_SLOW_NOP, 0, NULL); uint64_t OneTicks = ASMReadTSC() - OneStartTick; if (OneTicks < MinTicks) MinTicks = OneTicks; if (RT_UNLIKELY(rc != VINF_SUCCESS)) { RTPrintf("tstInt: SUPR3CallVMMR0Ex -> rc=%Rrc i=%d Expected VINF_SUCCESS!\n", rc, i); rcRet++; break; } } Ticks = ASMReadTSC() - StartTick; NanoSecs = RTTimeNanoTS() - StartTS; RTPrintf("tstInt: SUPR3CallVMMR0Ex - %d iterations in %llu ns / %llu ticks. %llu ns / %#llu ticks per iteration. Min %llu ticks.\n", i, NanoSecs, Ticks, NanoSecs / i, Ticks / i, MinTicks); } } else { RTPrintf("tstInt: SUPR3SetVMForFastIOCtl failed: %Rrc\n", rc); rcRet++; } } else { RTPrintf("tstInt: SUPR3ContAlloc(%#zx,,) failed\n", sizeof(*pVM)); rcRet++; } /* * Unload VMM. */ rc = SUPR3UnloadVMM(); if (rc) { RTPrintf("tstInt: SUPR3UnloadVMM failed with rc=%Rrc\n", rc); rcRet++; } } else { RTPrintf("tstInt: SUPR3LoadVMM failed with rc=%Rrc\n", rc); rcRet++; } /* * Terminate. */ rc = SUPR3Term(false /*fForced*/); rcRet += rc != 0; RTPrintf("tstInt: SUPR3Term -> rc=%Rrc\n", rc); }
/** * Handles the KVM hypercall. * * @returns VBox status code. * @param pVCpu Pointer to the VMCPU. * @param pCtx Pointer to the guest-CPU context. */ VMM_INT_DECL(int) gimKvmHypercall(PVMCPU pVCpu, PCPUMCTX pCtx) { /* * Get the hypercall operation and arguments. */ bool const fIs64BitMode = CPUMIsGuestIn64BitCodeEx(pCtx); uint64_t uHyperOp = pCtx->rax; uint64_t uHyperArg0 = pCtx->rbx; uint64_t uHyperArg1 = pCtx->rcx; uint64_t uHyperArg2 = pCtx->rdi; uint64_t uHyperArg3 = pCtx->rsi; uint64_t uHyperRet = KVM_HYPERCALL_RET_ENOSYS; uint64_t uAndMask = UINT64_C(0xffffffffffffffff); if (!fIs64BitMode) { uAndMask = UINT64_C(0xffffffff); uHyperOp &= UINT64_C(0xffffffff); uHyperArg0 &= UINT64_C(0xffffffff); uHyperArg1 &= UINT64_C(0xffffffff); uHyperArg2 &= UINT64_C(0xffffffff); uHyperArg3 &= UINT64_C(0xffffffff); uHyperRet &= UINT64_C(0xffffffff); } /* * Verify that guest ring-0 is the one making the hypercall. */ uint32_t uCpl = CPUMGetGuestCPL(pVCpu); if (uCpl) { pCtx->rax = KVM_HYPERCALL_RET_EPERM & uAndMask; return VINF_SUCCESS; } /* * Do the work. */ switch (uHyperOp) { case KVM_HYPERCALL_OP_KICK_CPU: { PVM pVM = pVCpu->CTX_SUFF(pVM); if (uHyperArg1 < pVM->cCpus) { PVMCPU pVCpuTarget = &pVM->aCpus[uHyperArg1]; /** ASSUMES pVCpu index == ApicId of the VCPU. */ VMCPU_FF_SET(pVCpuTarget, VMCPU_FF_UNHALT); #ifdef IN_RING0 /* * We might be here with preemption disabled or enabled (i.e. depending on thread-context hooks * being used), so don't try obtaining the GVMMR0 used lock here. See @bugref{7270#c148}. */ GVMMR0SchedWakeUpEx(pVM, pVCpuTarget->idCpu, false /* fTakeUsedLock */); #elif defined(IN_RING3) int rc2 = SUPR3CallVMMR0(pVM->pVMR0, pVCpuTarget->idCpu, VMMR0_DO_GVMM_SCHED_WAKE_UP, NULL); AssertRC(rc2); #elif defined(IN_RC) /* Nothing to do for raw-mode, shouldn't really be used by raw-mode guests anyway. */ Assert(pVM->cCpus == 1); #endif uHyperRet = KVM_HYPERCALL_RET_SUCCESS; } break; } case KVM_HYPERCALL_OP_VAPIC_POLL_IRQ: uHyperRet = KVM_HYPERCALL_RET_SUCCESS; break; default: break; } /* * Place the result in rax/eax. */ pCtx->rax = uHyperRet & uAndMask; return VINF_SUCCESS; }