Ejemplo n.º 1
0
/// <summary>
/// Enable EPT for CPU
/// </summary>
/// <param name="PML4">PML4 pointer to use</param>
VOID EptEnable( IN PEPT_PML4_ENTRY PML4 )
{
    VMX_CPU_BASED_CONTROLS primary = { 0 };
    VMX_SECONDARY_CPU_BASED_CONTROLS secondary = { 0 };
    EPT_TABLE_POINTER EPTP = { 0 };

    __vmx_vmread( SECONDARY_VM_EXEC_CONTROL, (size_t*)&secondary.All );
    __vmx_vmread( CPU_BASED_VM_EXEC_CONTROL, (size_t*)&primary.All );

    // Set up the EPTP
    EPTP.Fields.PhysAddr = MmGetPhysicalAddress( PML4 ).QuadPart >> 12;
    EPTP.Fields.PageWalkLength = 3;

    __vmx_vmwrite( EPT_POINTER, EPTP.All );
    __vmx_vmwrite( VIRTUAL_PROCESSOR_ID, VM_VPID );

    primary.Fields.ActivateSecondaryControl = TRUE;
    secondary.Fields.EnableEPT = TRUE;
    if(g_Data->VPIDSpported)
        secondary.Fields.EnableVPID = TRUE;

    __vmx_vmwrite( SECONDARY_VM_EXEC_CONTROL, secondary.All );
    __vmx_vmwrite( CPU_BASED_VM_EXEC_CONTROL, primary.All );

    // Critical step
    EPT_CTX ctx = { 0 };
    __invept( INV_ALL_CONTEXTS, &ctx );

    //DPRINT( "HyperBone: CPU %d: %s: EPT enabled\n", CPU_NUM, __FUNCTION__ );
}
Ejemplo n.º 2
0
/// <summary>
/// Disable EPT for CPU
/// </summary>
VOID EptDisable()
{
    VMX_SECONDARY_CPU_BASED_CONTROLS secondary = { 0 };
    __vmx_vmread( SECONDARY_VM_EXEC_CONTROL, (size_t*)&secondary.All );

    secondary.Fields.EnableEPT  = FALSE;
    secondary.Fields.EnableVPID = FALSE;

    __vmx_vmwrite( SECONDARY_VM_EXEC_CONTROL, secondary.All );

    // Clear out the EPTP
    __vmx_vmwrite( EPT_POINTER, 0 );
}
Ejemplo n.º 3
0
/// <summary>
/// Inject interrupt or exception into guest
/// </summary>
/// <param name="InterruptType">INterrupt type</param>
/// <param name="Vector">IDT index</param>
/// <param name="WriteLength">Intruction length skip</param>
VOID VmxInjectEvent( INTERRUPT_TYPE InterruptType, VECTOR_EXCEPTION Vector, ULONG WriteLength )
{   
    INTERRUPT_INJECT_INFO_FIELD InjectEvent = { 0 };

    InjectEvent.Fields.Vector = Vector;
    InjectEvent.Fields.Type = InterruptType;
    InjectEvent.Fields.DeliverErrorCode = 0;
    InjectEvent.Fields.Valid = 1;

    __vmx_vmwrite( VM_ENTRY_INTR_INFO_FIELD, InjectEvent.All );
    if (WriteLength > 0)
        __vmx_vmwrite( VM_ENTRY_INSTRUCTION_LEN, WriteLength );
}
Ejemplo n.º 4
0
VOID
ShvVmxHandleVmx (
    _In_ PSHV_VP_STATE VpState
    )
{
    //
    // Set the CF flag, which is how VMX instructions indicate failure
    //
    VpState->GuestEFlags |= 0x1; // VM_FAIL_INVALID

    //
    // RFLAGs is actually restored from the VMCS, so update it here
    //
    __vmx_vmwrite(GUEST_RFLAGS, VpState->GuestEFlags);
}
Ejemplo n.º 5
0
VOID
ShvVmxHandleExit (
    _In_ PSHV_VP_STATE VpState
    )
{
    //
    // This is the generic VM-Exit handler. Decode the reason for the exit and
    // call the appropriate handler. As per Intel specifications, given that we
    // have requested no optional exits whatsoever, we should only see CPUID,
    // INVD, XSETBV and other VMX instructions. GETSEC cannot happen as we do
    // not run in SMX context.
    //
    switch (VpState->ExitReason)
    {
    case EXIT_REASON_CPUID:
        ShvVmxHandleCpuid(VpState);
        break;
    case EXIT_REASON_INVD:
        ShvVmxHandleInvd();
        break;
    case EXIT_REASON_XSETBV:
        ShvVmxHandleXsetbv(VpState);
        break;
    case EXIT_REASON_VMCALL:
    case EXIT_REASON_VMCLEAR:
    case EXIT_REASON_VMLAUNCH:
    case EXIT_REASON_VMPTRLD:
    case EXIT_REASON_VMPTRST:
    case EXIT_REASON_VMREAD:
    case EXIT_REASON_VMRESUME:
    case EXIT_REASON_VMWRITE:
    case EXIT_REASON_VMXOFF:
    case EXIT_REASON_VMXON:
        ShvVmxHandleVmx(VpState);
        break;
    default:
        NT_ASSERT(FALSE);
        break;
    }

    //
    // Move the instruction pointer to the next instruction after the one that
    // caused the exit. Since we are not doing any special handling or changing
    // of execution, this can be done for any exit reason.
    //
    VpState->GuestRip += ShvVmxRead(VM_EXIT_INSTRUCTION_LEN);
    __vmx_vmwrite(GUEST_RIP, VpState->GuestRip);
}
Ejemplo n.º 6
0
_Use_decl_annotations_ EXTERN_C static bool VminitpSetupVMCS(
    const PER_PROCESSOR_DATA *ProcessorData, ULONG_PTR GuestStackPointer,
    ULONG_PTR GuestInstructionPointer, ULONG_PTR VmmStackPointer) {
  unsigned char error = 0;

  GDTR gdtr = {};
  __sgdt(&gdtr);

  IDTR idtr = {};
  __sidt(&idtr);

  VMX_VM_ENTER_CONTROLS vmEnterCtlRequested = {};
  vmEnterCtlRequested.Fields.IA32eModeGuest = true;
  VMX_VM_ENTER_CONTROLS vmEnterCtl = {
      VminitpAdjustControlValue(IA32_VMX_ENTRY_CTLS, vmEnterCtlRequested.All)};

  VMX_VM_EXIT_CONTROLS vmExitCtlRequested = {};
  vmExitCtlRequested.Fields.AcknowledgeInterruptOnExit = true;
  vmExitCtlRequested.Fields.HostAddressSpaceSize = true;
  VMX_VM_EXIT_CONTROLS vmExitCtl = {
      VminitpAdjustControlValue(IA32_VMX_EXIT_CTLS, vmExitCtlRequested.All)};

  VMX_PIN_BASED_CONTROLS vmPinCtlRequested = {};
  VMX_PIN_BASED_CONTROLS vmPinCtl = {
      VminitpAdjustControlValue(IA32_VMX_PINBASED_CTLS, vmPinCtlRequested.All)};

  VMX_CPU_BASED_CONTROLS vmCpuCtlRequested = {};
  vmCpuCtlRequested.Fields.RDTSCExiting = true;
  vmCpuCtlRequested.Fields.CR3LoadExiting = true;  // MOV to CR3
  vmCpuCtlRequested.Fields.CR8LoadExiting = true;  // MOV to CR8
  vmCpuCtlRequested.Fields.MovDRExiting = true;
  vmCpuCtlRequested.Fields.UseMSRBitmaps = true;
  vmCpuCtlRequested.Fields.ActivateSecondaryControl = true;
  VMX_CPU_BASED_CONTROLS vmCpuCtl = {VminitpAdjustControlValue(
      IA32_VMX_PROCBASED_CTLS, vmCpuCtlRequested.All)};

  VMX_SECONDARY_CPU_BASED_CONTROLS vmCpuCtl2Requested = {};
  vmCpuCtl2Requested.Fields.EnableRDTSCP = true;
  vmCpuCtl2Requested.Fields.DescriptorTableExiting = true;
  VMX_CPU_BASED_CONTROLS vmCpuCtl2 = {VminitpAdjustControlValue(
      IA32_VMX_PROCBASED_CTLS2, vmCpuCtl2Requested.All)};

  // Set up the MSR bitmap

  // Activate VM-exit for RDMSR against all MSRs
  const auto bitMapReadLow =
      reinterpret_cast<UCHAR *>(ProcessorData->MsrBitmap);
  const auto bitMapReadHigh = bitMapReadLow + 1024;
  RtlFillMemory(bitMapReadLow, 1024, 0xff);   // read        0 -     1fff
  RtlFillMemory(bitMapReadHigh, 1024, 0xff);  // read c0000000 - c0001fff

  // But ignore IA32_MPERF (000000e7) and IA32_APERF (000000e8)
  RTL_BITMAP bitMapReadLowHeader = {};
  RtlInitializeBitMap(&bitMapReadLowHeader,
                      reinterpret_cast<PULONG>(bitMapReadLow), 1024 * 8);
  RtlClearBits(&bitMapReadLowHeader, 0xe7, 2);

  // But ignore IA32_GS_BASE (c0000101) and IA32_KERNEL_GS_BASE (c0000102)
  RTL_BITMAP bitMapReadHighHeader = {};
  RtlInitializeBitMap(&bitMapReadHighHeader,
                      reinterpret_cast<PULONG>(bitMapReadHigh), 1024 * 8);
  RtlClearBits(&bitMapReadHighHeader, 0x101, 2);

  const auto msrBitmapPA = MmGetPhysicalAddress(ProcessorData->MsrBitmap);

  // Set up CR0 and CR4 bitmaps

  // Where a bit is     masked, the shadow bit appears
  // Where a bit is not masked, the actual bit appears
  CR0_REG cr0mask = {};
  cr0mask.Fields.WP = true;
  CR4_REG cr4mask = {};
  cr4mask.Fields.PGE = true;

  // clang-format off
  /* 16-Bit Control Field */

  /* 16-Bit Guest-State Fields */
  error |= __vmx_vmwrite(GUEST_ES_SELECTOR, AsmReadES());
  error |= __vmx_vmwrite(GUEST_CS_SELECTOR, AsmReadCS());
  error |= __vmx_vmwrite(GUEST_SS_SELECTOR, AsmReadSS());
  error |= __vmx_vmwrite(GUEST_DS_SELECTOR, AsmReadDS());
  error |= __vmx_vmwrite(GUEST_FS_SELECTOR, AsmReadFS());
  error |= __vmx_vmwrite(GUEST_GS_SELECTOR, AsmReadGS());
  error |= __vmx_vmwrite(GUEST_LDTR_SELECTOR, AsmReadLDTR());
  error |= __vmx_vmwrite(GUEST_TR_SELECTOR, AsmReadTR());

  /* 16-Bit Host-State Fields */
  error |= __vmx_vmwrite(HOST_ES_SELECTOR, AsmReadES() & 0xf8); // RPL and TI 
  error |= __vmx_vmwrite(HOST_CS_SELECTOR, AsmReadCS() & 0xf8); // have to be 0
  error |= __vmx_vmwrite(HOST_SS_SELECTOR, AsmReadSS() & 0xf8);
  error |= __vmx_vmwrite(HOST_DS_SELECTOR, AsmReadDS() & 0xf8);
  error |= __vmx_vmwrite(HOST_FS_SELECTOR, AsmReadFS() & 0xf8);
  error |= __vmx_vmwrite(HOST_GS_SELECTOR, AsmReadGS() & 0xf8);
  error |= __vmx_vmwrite(HOST_TR_SELECTOR, AsmReadTR() & 0xf8);

  /* 64-Bit Control Fields */
  error |= __vmx_vmwrite(IO_BITMAP_A, 0);
  error |= __vmx_vmwrite(IO_BITMAP_B, 0);
  error |= __vmx_vmwrite(MSR_BITMAP, msrBitmapPA.QuadPart);
  error |= __vmx_vmwrite(TSC_OFFSET, 0);

  /* 64-Bit Guest-State Fields */
  error |= __vmx_vmwrite(VMCS_LINK_POINTER, 0xffffffffffffffff);
  error |= __vmx_vmwrite(GUEST_IA32_DEBUGCTL, __readmsr(IA32_DEBUGCTL));

  /* 32-Bit Control Fields */
  error |= __vmx_vmwrite(PIN_BASED_VM_EXEC_CONTROL, vmPinCtl.All);
  error |= __vmx_vmwrite(CPU_BASED_VM_EXEC_CONTROL, vmCpuCtl.All);
  error |= __vmx_vmwrite(SECONDARY_VM_EXEC_CONTROL, vmCpuCtl2.All);
  error |= __vmx_vmwrite(EXCEPTION_BITMAP, 0);
  error |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
  error |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
  error |= __vmx_vmwrite(CR3_TARGET_COUNT, 0);
  error |= __vmx_vmwrite(VM_EXIT_CONTROLS, vmExitCtl.All);
  error |= __vmx_vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
  error |= __vmx_vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
  error |= __vmx_vmwrite(VM_ENTRY_CONTROLS, vmEnterCtl.All);
  error |= __vmx_vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
  error |= __vmx_vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);

  /* 32-Bit Guest-State Fields */
  error |= __vmx_vmwrite(GUEST_ES_LIMIT, GetSegmentLimit(AsmReadES()));
  error |= __vmx_vmwrite(GUEST_CS_LIMIT, GetSegmentLimit(AsmReadCS()));
  error |= __vmx_vmwrite(GUEST_SS_LIMIT, GetSegmentLimit(AsmReadSS()));
  error |= __vmx_vmwrite(GUEST_DS_LIMIT, GetSegmentLimit(AsmReadDS()));
  error |= __vmx_vmwrite(GUEST_FS_LIMIT, GetSegmentLimit(AsmReadFS()));
  error |= __vmx_vmwrite(GUEST_GS_LIMIT, GetSegmentLimit(AsmReadGS()));
  error |= __vmx_vmwrite(GUEST_LDTR_LIMIT, GetSegmentLimit(AsmReadLDTR()));
  error |= __vmx_vmwrite(GUEST_TR_LIMIT, GetSegmentLimit(AsmReadTR()));
  error |= __vmx_vmwrite(GUEST_GDTR_LIMIT, gdtr.Limit);
  error |= __vmx_vmwrite(GUEST_IDTR_LIMIT, idtr.Limit);
  error |= __vmx_vmwrite(GUEST_ES_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadES()));
  error |= __vmx_vmwrite(GUEST_CS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadCS()));
  error |= __vmx_vmwrite(GUEST_SS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadSS()));
  error |= __vmx_vmwrite(GUEST_DS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadDS()));
  error |= __vmx_vmwrite(GUEST_FS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadFS()));
  error |= __vmx_vmwrite(GUEST_GS_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadGS()));
  error |= __vmx_vmwrite(GUEST_LDTR_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadLDTR()));
  error |= __vmx_vmwrite(GUEST_TR_AR_BYTES, VminitpGetSegmentAccessRight(AsmReadTR()));
  error |= __vmx_vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
  error |= __vmx_vmwrite(GUEST_ACTIVITY_STATE, 0);
  error |= __vmx_vmwrite(GUEST_SYSENTER_CS, __readmsr(IA32_SYSENTER_CS));

  /* 32-Bit Host-State Field */
  error |= __vmx_vmwrite(HOST_IA32_SYSENTER_CS, __readmsr(IA32_SYSENTER_CS));

  /* Natural-Width Control Fields */
  error |= __vmx_vmwrite(CR0_GUEST_HOST_MASK, cr0mask.All);
  error |= __vmx_vmwrite(CR4_GUEST_HOST_MASK, cr4mask.All);
  error |= __vmx_vmwrite(CR0_READ_SHADOW, __readcr0());
  error |= __vmx_vmwrite(CR4_READ_SHADOW, __readcr4());
  error |= __vmx_vmwrite(CR3_TARGET_VALUE0, 0);
  error |= __vmx_vmwrite(CR3_TARGET_VALUE1, 0);
  error |= __vmx_vmwrite(CR3_TARGET_VALUE2, 0);
  error |= __vmx_vmwrite(CR3_TARGET_VALUE3, 0);

  /* Natural-Width Guest-State Fields */
  error |= __vmx_vmwrite(GUEST_CR0, __readcr0());
  error |= __vmx_vmwrite(GUEST_CR3, __readcr3());
  error |= __vmx_vmwrite(GUEST_CR4, __readcr4());
  error |= __vmx_vmwrite(GUEST_ES_BASE, 0);
  error |= __vmx_vmwrite(GUEST_CS_BASE, 0);
  error |= __vmx_vmwrite(GUEST_SS_BASE, 0);
  error |= __vmx_vmwrite(GUEST_DS_BASE, 0);
  error |= __vmx_vmwrite(GUEST_FS_BASE, __readmsr(IA32_FS_BASE));
  error |= __vmx_vmwrite(GUEST_GS_BASE, __readmsr(IA32_GS_BASE));
  error |= __vmx_vmwrite(GUEST_LDTR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadLDTR()));
  error |= __vmx_vmwrite(GUEST_TR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadTR()));
  error |= __vmx_vmwrite(GUEST_GDTR_BASE, gdtr.Address);
  error |= __vmx_vmwrite(GUEST_IDTR_BASE, idtr.Address);
  error |= __vmx_vmwrite(GUEST_DR7, __readdr(7));
  error |= __vmx_vmwrite(GUEST_RSP, GuestStackPointer);
  error |= __vmx_vmwrite(GUEST_RIP, GuestInstructionPointer);
  error |= __vmx_vmwrite(GUEST_RFLAGS, __readeflags());
  error |= __vmx_vmwrite(GUEST_SYSENTER_ESP, __readmsr(IA32_SYSENTER_ESP));
  error |= __vmx_vmwrite(GUEST_SYSENTER_EIP, __readmsr(IA32_SYSENTER_EIP));

  /* Natural-Width Host-State Fields */
  error |= __vmx_vmwrite(HOST_CR0, __readcr0());
  error |= __vmx_vmwrite(HOST_CR3, __readcr3());
  error |= __vmx_vmwrite(HOST_CR4, __readcr4());
  error |= __vmx_vmwrite(HOST_FS_BASE, __readmsr(IA32_FS_BASE));
  error |= __vmx_vmwrite(HOST_GS_BASE, __readmsr(IA32_GS_BASE));
  error |= __vmx_vmwrite(HOST_TR_BASE, VminitpGetSegmentBase(gdtr.Address, AsmReadTR()));
  error |= __vmx_vmwrite(HOST_GDTR_BASE, gdtr.Address);
  error |= __vmx_vmwrite(HOST_IDTR_BASE, idtr.Address);
  error |= __vmx_vmwrite(HOST_IA32_SYSENTER_ESP, __readmsr(IA32_SYSENTER_ESP));
  error |= __vmx_vmwrite(HOST_IA32_SYSENTER_EIP, __readmsr(IA32_SYSENTER_EIP));
  error |= __vmx_vmwrite(HOST_RSP, VmmStackPointer);
  error |= __vmx_vmwrite(HOST_RIP, reinterpret_cast<size_t>(AsmVmmEntryPoint));
  // clang-format on

  const auto vmxStatus = static_cast<VMX_STATUS>(error);
  return vmxStatus == VMX_OK;
}
Ejemplo n.º 7
0
/// <summary>
/// Setup VMCS fields
/// </summary>
/// <param name="VpData">Virtual CPU data</param>
VOID VmxSetupVMCS( IN PVCPU VpData )
{
    PKPROCESSOR_STATE state = &VpData->HostState;
    VMX_GDTENTRY64 vmxGdtEntry = { 0 };
    VMX_VM_ENTER_CONTROLS vmEnterCtlRequested = { 0 };
    VMX_VM_EXIT_CONTROLS vmExitCtlRequested = { 0 };
    VMX_PIN_BASED_CONTROLS vmPinCtlRequested = { 0 };
    VMX_CPU_BASED_CONTROLS vmCpuCtlRequested = { 0 };
    VMX_SECONDARY_CPU_BASED_CONTROLS vmCpuCtl2Requested = { 0 };

    // As we exit back into the guest, make sure to exist in x64 mode as well.
    vmEnterCtlRequested.Fields.IA32eModeGuest = TRUE;

    // If any interrupts were pending upon entering the hypervisor, acknowledge
    // them when we're done. And make sure to enter us in x64 mode at all times
    vmExitCtlRequested.Fields.AcknowledgeInterruptOnExit = TRUE;
    vmExitCtlRequested.Fields.HostAddressSpaceSize = TRUE;

    // In order for our choice of supporting RDTSCP and XSAVE/RESTORES above to
    // actually mean something, we have to request secondary controls. We also
    // want to activate the MSR bitmap in order to keep them from being caught.
    vmCpuCtlRequested.Fields.UseMSRBitmaps = TRUE;
    vmCpuCtlRequested.Fields.ActivateSecondaryControl = TRUE;
    //vmCpuCtlRequested.Fields.UseTSCOffseting = TRUE;
    //vmCpuCtlRequested.Fields.RDTSCExiting = TRUE;

    // VPID caches must be invalidated on CR3 change
    if(g_Data->Features.VPID)
        vmCpuCtlRequested.Fields.CR3LoadExiting = TRUE;

    // Enable support for RDTSCP and XSAVES/XRESTORES in the guest. Windows 10
    // makes use of both of these instructions if the CPU supports it. By using
    // VmxpAdjustMsr, these options will be ignored if this processor does
    // not actually support the instructions to begin with.
    vmCpuCtl2Requested.Fields.EnableRDTSCP = TRUE;
    vmCpuCtl2Requested.Fields.EnableXSAVESXSTORS = TRUE;

    // Begin by setting the link pointer to the required value for 4KB VMCS.
    __vmx_vmwrite( VMCS_LINK_POINTER, MAXULONG64 );

    __vmx_vmwrite(
        PIN_BASED_VM_EXEC_CONTROL, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_PINBASED_CTLS )], vmPinCtlRequested.All ) 
        );
    __vmx_vmwrite( 
        CPU_BASED_VM_EXEC_CONTROL, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_PROCBASED_CTLS )], vmCpuCtlRequested.All ) 
        );
    __vmx_vmwrite( 
        SECONDARY_VM_EXEC_CONTROL, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_PROCBASED_CTLS2 )], vmCpuCtl2Requested.All ) 
        );
    __vmx_vmwrite(
        VM_EXIT_CONTROLS, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_EXIT_CTLS )], vmExitCtlRequested.All ) 
        );
    __vmx_vmwrite( 
        VM_ENTRY_CONTROLS, 
        VmxpAdjustMsr( VpData->MsrData[VMX_MSR( MSR_IA32_VMX_TRUE_ENTRY_CTLS )], vmEnterCtlRequested.All ) 
        );

    // Load the MSR bitmap. Unlike other bitmaps, not having an MSR bitmap will
    // trap all MSRs, so have to allocate an empty one.
    PUCHAR bitMapReadLow = g_Data->MSRBitmap;       // 0x00000000 - 0x00001FFF
    PUCHAR bitMapReadHigh = bitMapReadLow + 1024;   // 0xC0000000 - 0xC0001FFF

    RTL_BITMAP bitMapReadLowHeader = { 0 };
    RTL_BITMAP bitMapReadHighHeader = { 0 };
    RtlInitializeBitMap( &bitMapReadLowHeader, (PULONG)bitMapReadLow, 1024 * 8 );
    RtlInitializeBitMap( &bitMapReadHighHeader, (PULONG)bitMapReadHigh, 1024 * 8 );

    RtlSetBit( &bitMapReadLowHeader, MSR_IA32_FEATURE_CONTROL );    // MSR_IA32_FEATURE_CONTROL
    RtlSetBit( &bitMapReadLowHeader,  MSR_IA32_DEBUGCTL );          // MSR_DEBUGCTL
    RtlSetBit( &bitMapReadHighHeader, MSR_LSTAR - 0xC0000000 );     // MSR_LSTAR

    // VMX MSRs
    for (ULONG i = MSR_IA32_VMX_BASIC; i <= MSR_IA32_VMX_VMFUNC; i++)
        RtlSetBit( &bitMapReadLowHeader, i );

    __vmx_vmwrite( MSR_BITMAP, MmGetPhysicalAddress( g_Data->MSRBitmap ).QuadPart );

    // Exception bitmap
    ULONG ExceptionBitmap = 0;
    //ExceptionBitmap |= 1 << VECTOR_DEBUG_EXCEPTION;
    ExceptionBitmap |= 1 << VECTOR_BREAKPOINT_EXCEPTION;

    __vmx_vmwrite( EXCEPTION_BITMAP, ExceptionBitmap );

    // CS (Ring 0 Code)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegCs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_CS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_CS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_CS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_CS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_CS_SELECTOR, state->ContextFrame.SegCs & ~RPL_MASK );

    // SS (Ring 0 Data)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegSs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_SS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_SS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_SS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_SS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_SS_SELECTOR, state->ContextFrame.SegSs & ~RPL_MASK );

    // DS (Ring 3 Data)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegDs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_DS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_DS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_DS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_DS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_DS_SELECTOR, state->ContextFrame.SegDs & ~RPL_MASK );

    // ES (Ring 3 Data)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegEs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_ES_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_ES_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_ES_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_ES_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_ES_SELECTOR, state->ContextFrame.SegEs & ~RPL_MASK );

    // FS (Ring 3 Compatibility-Mode TEB)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegFs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_FS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_FS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_FS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_FS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_FS_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_FS_SELECTOR, state->ContextFrame.SegFs & ~RPL_MASK );

    // GS (Ring 3 Data if in Compatibility-Mode, MSR-based in Long Mode)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->ContextFrame.SegGs, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_GS_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_GS_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_GS_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_GS_BASE, state->SpecialRegisters.MsrGsBase );
    __vmx_vmwrite( HOST_GS_BASE, state->SpecialRegisters.MsrGsBase );
    __vmx_vmwrite( HOST_GS_SELECTOR, state->ContextFrame.SegGs & ~RPL_MASK );

    // Task Register (Ring 0 TSS)
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->SpecialRegisters.Tr, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_TR_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_TR_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_TR_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_TR_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_TR_BASE, vmxGdtEntry.Base );
    __vmx_vmwrite( HOST_TR_SELECTOR, state->SpecialRegisters.Tr & ~RPL_MASK );

    // LDT
    VmxpConvertGdtEntry( state->SpecialRegisters.Gdtr.Base, state->SpecialRegisters.Ldtr, &vmxGdtEntry );
    __vmx_vmwrite( GUEST_LDTR_SELECTOR, vmxGdtEntry.Selector );
    __vmx_vmwrite( GUEST_LDTR_LIMIT, vmxGdtEntry.Limit );
    __vmx_vmwrite( GUEST_LDTR_AR_BYTES, vmxGdtEntry.AccessRights );
    __vmx_vmwrite( GUEST_LDTR_BASE, vmxGdtEntry.Base );

    // GDT
    __vmx_vmwrite( GUEST_GDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Gdtr.Base );
    __vmx_vmwrite( GUEST_GDTR_LIMIT, state->SpecialRegisters.Gdtr.Limit );
    __vmx_vmwrite( HOST_GDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Gdtr.Base );

    // IDT
    __vmx_vmwrite( GUEST_IDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Idtr.Base );
    __vmx_vmwrite( GUEST_IDTR_LIMIT, state->SpecialRegisters.Idtr.Limit );
    __vmx_vmwrite( HOST_IDTR_BASE, (ULONG_PTR)state->SpecialRegisters.Idtr.Base );

    // CR0
    __vmx_vmwrite( CR0_READ_SHADOW, state->SpecialRegisters.Cr0 );
    __vmx_vmwrite( HOST_CR0, state->SpecialRegisters.Cr0 );
    __vmx_vmwrite( GUEST_CR0, state->SpecialRegisters.Cr0 );

    // CR3 -- do not use the current process' address space for the host,
    // because we may be executing in an arbitrary user-mode process right now
    // as part of the DPC interrupt we execute in.
    __vmx_vmwrite( HOST_CR3, VpData->SystemDirectoryTableBase );
    __vmx_vmwrite( GUEST_CR3, state->SpecialRegisters.Cr3 );

    // CR4
    __vmx_vmwrite( HOST_CR4, state->SpecialRegisters.Cr4 );
    __vmx_vmwrite( GUEST_CR4, state->SpecialRegisters.Cr4 );
    __vmx_vmwrite( CR4_GUEST_HOST_MASK, 0x2000 );
    __vmx_vmwrite( CR4_READ_SHADOW, state->SpecialRegisters.Cr4 & ~0x2000 );

    // Debug MSR and DR7
    __vmx_vmwrite( GUEST_IA32_DEBUGCTL, state->SpecialRegisters.DebugControl );
    __vmx_vmwrite( GUEST_DR7, state->SpecialRegisters.KernelDr7 );

    // Finally, load the guest stack, instruction pointer, and rflags, which
    // corresponds exactly to the location where RtlCaptureContext will return
    // to inside of VmxInitializeCPU.
    __vmx_vmwrite( GUEST_RSP, state->ContextFrame.Rsp );
    __vmx_vmwrite( GUEST_RIP, state->ContextFrame.Rip );
    __vmx_vmwrite( GUEST_RFLAGS, state->ContextFrame.EFlags );

    // Load the hypervisor entrypoint and stack. We give ourselves a standard
    // size kernel stack (24KB) and bias for the context structure that the
    // hypervisor entrypoint will push on the stack, avoiding the need for RSP
    // modifying instructions in the entrypoint. Note that the CONTEXT pointer
    // and thus the stack itself, must be 16-byte aligned for ABI compatibility
    // with AMD64 -- specifically, XMM operations will fail otherwise, such as
    // the ones that RtlCaptureContext will perform.
    NT_ASSERT( (KERNEL_STACK_SIZE - sizeof( CONTEXT )) % 16 == 0 );
    __vmx_vmwrite( HOST_RSP, (ULONG_PTR)VpData->VMMStack + KERNEL_STACK_SIZE - sizeof( CONTEXT ) );
    __vmx_vmwrite( HOST_RIP, (ULONG_PTR)VmxVMEntry );
}
Ejemplo n.º 8
0
UCHAR vmwrite(size_t CtlCode, size_t Value)
{
    KdPrint(("vmwrite %llx, %llx\n", CtlCode, Value));
    return __vmx_vmwrite(CtlCode, Value);
}
Ejemplo n.º 9
0
Archivo: vcpu.c Proyecto: HideSand/ksm
static bool setup_vmcs(struct vcpu *vcpu, uintptr_t sp, uintptr_t ip, uintptr_t stack_base)
{
	struct gdtr gdtr;
	__sgdt(&gdtr);

	struct gdtr idtr;
	__sidt(&idtr);

	/* Get this CPU's EPT  */
	struct ept *ept = &vcpu->ept;

	u64 cr0 = __readcr0();
	u64 cr4 = __readcr4();
	u64 err = 0;

	u16 es = __reades();
	u16 cs = __readcs();
	u16 ss = __readss();
	u16 ds = __readds();
	u16 fs = __readfs();
	u16 gs = __readgs();
	u16 ldt = __sldt();
	u16 tr = __str();

	vcpu->g_idt.base = idtr.base;
	vcpu->g_idt.limit = idtr.limit;

	struct kidt_entry64 *current = (struct kidt_entry64 *)idtr.base;
	struct kidt_entry64 *shadow = (struct kidt_entry64 *)vcpu->idt.base;
	unsigned count = idtr.limit / sizeof(*shadow);
	for (unsigned n = 0; n < count; ++n)
		memcpy(&shadow[n], &current[n], sizeof(*shadow));
	vcpu_put_idt(vcpu, cs, X86_TRAP_VE, __ept_violation);

	u8 msr_off = 0;
	if (__readmsr(MSR_IA32_VMX_BASIC) & VMX_BASIC_TRUE_CTLS)
		msr_off = 0xC;

	u64 vm_entry = VM_ENTRY_IA32E_MODE;// | VM_ENTRY_LOAD_IA32_PAT;
	adjust_ctl_val(MSR_IA32_VMX_ENTRY_CTLS + msr_off, &vm_entry);

	u64 vm_exit = VM_EXIT_ACK_INTR_ON_EXIT | VM_EXIT_HOST_ADDR_SPACE_SIZE;
	adjust_ctl_val(MSR_IA32_VMX_EXIT_CTLS + msr_off, &vm_exit);

	u64 vm_pinctl = 0;
	adjust_ctl_val(MSR_IA32_VMX_PINBASED_CTLS + msr_off, &vm_pinctl);

	u64 vm_cpuctl = CPU_BASED_ACTIVATE_SECONDARY_CONTROLS | CPU_BASED_USE_MSR_BITMAPS |
		CPU_BASED_MOV_DR_EXITING | CPU_BASED_USE_TSC_OFFSETING;
	adjust_ctl_val(MSR_IA32_VMX_PROCBASED_CTLS + msr_off, &vm_cpuctl);

	u64 vm_2ndctl = SECONDARY_EXEC_ENABLE_EPT | SECONDARY_EXEC_TSC_SCALING |
		SECONDARY_EXEC_DESC_TABLE_EXITING | SECONDARY_EXEC_XSAVES | SECONDARY_EXEC_RDTSCP |
		SECONDARY_EXEC_ENABLE_VMFUNC | SECONDARY_EXEC_ENABLE_VE;
	adjust_ctl_val(MSR_IA32_VMX_PROCBASED_CTLS2, &vm_2ndctl);

	/* Processor control fields  */
	err |= __vmx_vmwrite(PIN_BASED_VM_EXEC_CONTROL, vm_pinctl);
	err |= __vmx_vmwrite(CPU_BASED_VM_EXEC_CONTROL, vm_cpuctl);
	err |= __vmx_vmwrite(EXCEPTION_BITMAP, __EXCEPTION_BITMAP);
	err |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MASK, 0);
	err |= __vmx_vmwrite(PAGE_FAULT_ERROR_CODE_MATCH, 0);
	err |= __vmx_vmwrite(CR3_TARGET_COUNT, 0);
	err |= __vmx_vmwrite(VM_EXIT_CONTROLS, vm_exit);
	err |= __vmx_vmwrite(VM_EXIT_MSR_STORE_COUNT, 0);
	err |= __vmx_vmwrite(VM_EXIT_MSR_LOAD_COUNT, 0);
	err |= __vmx_vmwrite(VM_ENTRY_CONTROLS, vm_entry);
	err |= __vmx_vmwrite(VM_ENTRY_MSR_LOAD_COUNT, 0);
	err |= __vmx_vmwrite(VM_ENTRY_INTR_INFO_FIELD, 0);
	err |= __vmx_vmwrite(SECONDARY_VM_EXEC_CONTROL, vm_2ndctl);

	/* Control Fields */
	err |= __vmx_vmwrite(IO_BITMAP_A, 0);
	err |= __vmx_vmwrite(IO_BITMAP_B, 0);
	err |= __vmx_vmwrite(MSR_BITMAP, __pa(ksm.msr_bitmap));
	err |= __vmx_vmwrite(EPT_POINTER, EPTP(ept, EPTP_DEFAULT));
	err |= __vmx_vmwrite(VM_FUNCTION_CTRL, VM_FUNCTION_CTL_EPTP_SWITCHING);
	err |= __vmx_vmwrite(EPTP_INDEX, EPTP_DEFAULT);
	err |= __vmx_vmwrite(EPTP_LIST_ADDRESS, __pa(ept->ptr_list));
	err |= __vmx_vmwrite(VE_INFO_ADDRESS, __pa(vcpu->ve));
	err |= __vmx_vmwrite(CR0_GUEST_HOST_MASK, __CR0_GUEST_HOST_MASK);
	err |= __vmx_vmwrite(CR4_GUEST_HOST_MASK, __CR4_GUEST_HOST_MASK);
	err |= __vmx_vmwrite(CR0_READ_SHADOW, cr0);
	err |= __vmx_vmwrite(CR4_READ_SHADOW, cr4);
	err |= __vmx_vmwrite(VMCS_LINK_POINTER, -1ULL);

	/* Guest  */
	err |= __vmx_vmwrite(GUEST_ES_SELECTOR, es);
	err |= __vmx_vmwrite(GUEST_CS_SELECTOR, cs);
	err |= __vmx_vmwrite(GUEST_SS_SELECTOR, ss);
	err |= __vmx_vmwrite(GUEST_DS_SELECTOR, ds);
	err |= __vmx_vmwrite(GUEST_FS_SELECTOR, fs);
	err |= __vmx_vmwrite(GUEST_GS_SELECTOR, gs);
	err |= __vmx_vmwrite(GUEST_LDTR_SELECTOR, ldt);
	err |= __vmx_vmwrite(GUEST_TR_SELECTOR, tr);
	err |= __vmx_vmwrite(GUEST_ES_LIMIT, __segmentlimit(es));
	err |= __vmx_vmwrite(GUEST_CS_LIMIT, __segmentlimit(cs));
	err |= __vmx_vmwrite(GUEST_SS_LIMIT, __segmentlimit(ss));
	err |= __vmx_vmwrite(GUEST_DS_LIMIT, __segmentlimit(ds));
	err |= __vmx_vmwrite(GUEST_FS_LIMIT, __segmentlimit(fs));
	err |= __vmx_vmwrite(GUEST_GS_LIMIT, __segmentlimit(gs));
	err |= __vmx_vmwrite(GUEST_LDTR_LIMIT, __segmentlimit(ldt));
	err |= __vmx_vmwrite(GUEST_TR_LIMIT, __segmentlimit(tr));
	err |= __vmx_vmwrite(GUEST_GDTR_LIMIT, gdtr.limit);
	err |= __vmx_vmwrite(GUEST_IDTR_LIMIT, idtr.limit);
	err |= __vmx_vmwrite(GUEST_ES_AR_BYTES, __accessright(es));
	err |= __vmx_vmwrite(GUEST_CS_AR_BYTES, __accessright(cs));
	err |= __vmx_vmwrite(GUEST_SS_AR_BYTES, __accessright(ss));
	err |= __vmx_vmwrite(GUEST_DS_AR_BYTES, __accessright(ds));
	err |= __vmx_vmwrite(GUEST_FS_AR_BYTES, __accessright(fs));
	err |= __vmx_vmwrite(GUEST_GS_AR_BYTES, __accessright(gs));
	err |= __vmx_vmwrite(GUEST_LDTR_AR_BYTES, __accessright(ldt));
	err |= __vmx_vmwrite(GUEST_TR_AR_BYTES, __accessright(tr));
	err |= __vmx_vmwrite(GUEST_INTERRUPTIBILITY_INFO, 0);
	err |= __vmx_vmwrite(GUEST_ACTIVITY_STATE, 0);
	err |= __vmx_vmwrite(GUEST_IA32_DEBUGCTL, __readmsr(MSR_IA32_DEBUGCTLMSR));
	err |= __vmx_vmwrite(GUEST_SYSENTER_CS, __readmsr(MSR_IA32_SYSENTER_CS));
	err |= __vmx_vmwrite(GUEST_CR0, cr0);
	err |= __vmx_vmwrite(GUEST_CR3, ksm.origin_cr3);
	err |= __vmx_vmwrite(GUEST_CR4, cr4);
	err |= __vmx_vmwrite(GUEST_ES_BASE, 0);
	err |= __vmx_vmwrite(GUEST_CS_BASE, 0);
	err |= __vmx_vmwrite(GUEST_SS_BASE, 0);
	err |= __vmx_vmwrite(GUEST_DS_BASE, 0);
	err |= __vmx_vmwrite(GUEST_FS_BASE, __readmsr(MSR_IA32_FS_BASE));
	err |= __vmx_vmwrite(GUEST_GS_BASE, __readmsr(MSR_IA32_GS_BASE));
	err |= __vmx_vmwrite(GUEST_LDTR_BASE, __segmentbase(gdtr.base, ldt));
	err |= __vmx_vmwrite(GUEST_TR_BASE, __segmentbase(gdtr.base, tr));
	err |= __vmx_vmwrite(GUEST_GDTR_BASE, gdtr.base);
	err |= __vmx_vmwrite(GUEST_IDTR_BASE, vcpu->idt.base);
	err |= __vmx_vmwrite(GUEST_DR7, __readdr(7));
	err |= __vmx_vmwrite(GUEST_RSP, sp);
	err |= __vmx_vmwrite(GUEST_RIP, ip);
	err |= __vmx_vmwrite(GUEST_RFLAGS, __readeflags());
	err |= __vmx_vmwrite(GUEST_SYSENTER_ESP, __readmsr(MSR_IA32_SYSENTER_ESP));
	err |= __vmx_vmwrite(GUEST_SYSENTER_EIP, __readmsr(MSR_IA32_SYSENTER_EIP));

	/* Host  */
	err |= __vmx_vmwrite(HOST_ES_SELECTOR, es & 0xf8);
	err |= __vmx_vmwrite(HOST_CS_SELECTOR, cs & 0xf8);
	err |= __vmx_vmwrite(HOST_SS_SELECTOR, ss & 0xf8);
	err |= __vmx_vmwrite(HOST_DS_SELECTOR, ds & 0xf8);
	err |= __vmx_vmwrite(HOST_FS_SELECTOR, fs & 0xf8);
	err |= __vmx_vmwrite(HOST_GS_SELECTOR, gs & 0xf8);
	err |= __vmx_vmwrite(HOST_TR_SELECTOR, tr & 0xf8);
	err |= __vmx_vmwrite(HOST_CR0, cr0);
	err |= __vmx_vmwrite(HOST_CR3, ksm.kernel_cr3);
	err |= __vmx_vmwrite(HOST_CR4, cr4);
	err |= __vmx_vmwrite(HOST_FS_BASE, __readmsr(MSR_IA32_FS_BASE));
	err |= __vmx_vmwrite(HOST_GS_BASE, __readmsr(MSR_IA32_GS_BASE));
	err |= __vmx_vmwrite(HOST_TR_BASE, __segmentbase(gdtr.base, tr));
	err |= __vmx_vmwrite(HOST_GDTR_BASE, gdtr.base);
	err |= __vmx_vmwrite(HOST_IDTR_BASE, idtr.base);
	err |= __vmx_vmwrite(HOST_IA32_SYSENTER_CS, __readmsr(MSR_IA32_SYSENTER_CS));
	err |= __vmx_vmwrite(HOST_IA32_SYSENTER_ESP, __readmsr(MSR_IA32_SYSENTER_ESP));
	err |= __vmx_vmwrite(HOST_IA32_SYSENTER_EIP, __readmsr(MSR_IA32_SYSENTER_EIP));
	err |= __vmx_vmwrite(HOST_RSP, stack_base);
	err |= __vmx_vmwrite(HOST_RIP, (uintptr_t)__vmx_entrypoint);

	return err == 0;
}