/** * Enables the Hyper-V Hypercall page. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param GCPhysHypercallPage Where to map the hypercall page. */ VMMR3_INT_DECL(int) gimR3HvEnableHypercallPage(PVM pVM, RTGCPHYS GCPhysHypercallPage) { PPDMDEVINSR3 pDevIns = pVM->gim.s.pDevInsR3; PGIMMMIO2REGION pRegion = &pVM->gim.s.u.Hv.aMmio2Regions[GIM_HV_HYPERCALL_PAGE_REGION_IDX]; AssertPtrReturn(pDevIns, VERR_GIM_DEVICE_NOT_REGISTERED); if (pRegion->fMapped) { /* * Is it already enabled at the given guest-address? */ if (pRegion->GCPhysPage == GCPhysHypercallPage) return VINF_SUCCESS; /* * If it's mapped at a different address, unmap the previous address. */ int rc2 = gimR3HvDisableHypercallPage(pVM); AssertRC(rc2); } /* * Map the hypercall-page at the specified address. */ Assert(!pRegion->fMapped); int rc = GIMR3Mmio2Map(pVM, pRegion, GCPhysHypercallPage); if (RT_SUCCESS(rc)) { Assert(pRegion->GCPhysPage == GCPhysHypercallPage); /* * Patch the hypercall-page. */ size_t cbWritten = 0; rc = VMMPatchHypercall(pVM, pRegion->pvPageR3, PAGE_SIZE, &cbWritten); if ( RT_SUCCESS(rc) && cbWritten < PAGE_SIZE) { uint8_t *pbLast = (uint8_t *)pRegion->pvPageR3 + cbWritten; *pbLast = 0xc3; /* RET */ /* * Notify VMM that hypercalls are now enabled for all VCPUs. */ for (VMCPUID i = 0; i < pVM->cCpus; i++) VMMHypercallsEnable(&pVM->aCpus[i]); LogRel(("GIM: HyperV: Enabled hypercalls at %#RGp\n", GCPhysHypercallPage)); return VINF_SUCCESS; } else { if (rc == VINF_SUCCESS) rc = VERR_GIM_OPERATION_FAILED; LogRel(("GIM: HyperV: VMMPatchHypercall failed. rc=%Rrc cbWritten=%u\n", rc, cbWritten)); } GIMR3Mmio2Unmap(pVM, pRegion); } LogRel(("GIM: HyperV: GIMR3Mmio2Map failed. rc=%Rrc\n", rc)); return rc; }
/** * Initializes the KVM GIM provider. * * @returns VBox status code. * @param pVM Pointer to the VM. * @param uVersion The interface version this VM should use. */ VMMR3_INT_DECL(int) gimR3KvmInit(PVM pVM) { AssertReturn(pVM, VERR_INVALID_PARAMETER); AssertReturn(pVM->gim.s.enmProviderId == GIMPROVIDERID_KVM, VERR_INTERNAL_ERROR_5); int rc; PGIMKVM pKvm = &pVM->gim.s.u.Kvm; /* * Determine interface capabilities based on the version. */ if (!pVM->gim.s.u32Version) { /* Basic features. */ pKvm->uBaseFeat = 0 | GIM_KVM_BASE_FEAT_CLOCK_OLD //| GIM_KVM_BASE_FEAT_NOP_IO_DELAY //| GIM_KVM_BASE_FEAT_MMU_OP | GIM_KVM_BASE_FEAT_CLOCK //| GIM_KVM_BASE_FEAT_ASYNC_PF //| GIM_KVM_BASE_FEAT_STEAL_TIME //| GIM_KVM_BASE_FEAT_PV_EOI | GIM_KVM_BASE_FEAT_PV_UNHALT ; /* Rest of the features are determined in gimR3KvmInitCompleted(). */ } /* * Expose HVP (Hypervisor Present) bit to the guest. */ CPUMSetGuestCpuIdFeature(pVM, CPUMCPUIDFEATURE_HVP); /* * Modify the standard hypervisor leaves for KVM. */ CPUMCPUIDLEAF HyperLeaf; RT_ZERO(HyperLeaf); HyperLeaf.uLeaf = UINT32_C(0x40000000); HyperLeaf.uEax = UINT32_C(0x40000001); /* Minimum value for KVM is 0x40000001. */ HyperLeaf.uEbx = 0x4B4D564B; /* 'KVMK' */ HyperLeaf.uEcx = 0x564B4D56; /* 'VMKV' */ HyperLeaf.uEdx = 0x0000004D; /* 'M000' */ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf); AssertLogRelRCReturn(rc, rc); /* * Add KVM specific leaves. */ HyperLeaf.uLeaf = UINT32_C(0x40000001); HyperLeaf.uEax = pKvm->uBaseFeat; HyperLeaf.uEbx = 0; /* Reserved */ HyperLeaf.uEcx = 0; /* Reserved */ HyperLeaf.uEdx = 0; /* Reserved */ rc = CPUMR3CpuIdInsert(pVM, &HyperLeaf); AssertLogRelRCReturn(rc, rc); /* * Insert all MSR ranges of KVM. */ for (unsigned i = 0; i < RT_ELEMENTS(g_aMsrRanges_Kvm); i++) { rc = CPUMR3MsrRangesInsert(pVM, &g_aMsrRanges_Kvm[i]); AssertLogRelRCReturn(rc, rc); } /* * Setup hypercall and #UD handling. */ for (VMCPUID i = 0; i < pVM->cCpus; i++) VMMHypercallsEnable(&pVM->aCpus[i]); if (ASMIsAmdCpu()) { pKvm->fTrapXcptUD = true; pKvm->uOpCodeNative = OP_VMMCALL; } else { Assert(ASMIsIntelCpu() || ASMIsViaCentaurCpu()); pKvm->fTrapXcptUD = false; pKvm->uOpCodeNative = OP_VMCALL; } /* We always need to trap VMCALL/VMMCALL hypercall using #UDs for raw-mode VMs. */ if (!HMIsEnabled(pVM)) pKvm->fTrapXcptUD = true; return VINF_SUCCESS; }