예제 #1
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va = exit_qualification.fields.valid_guest_linear_address
                            ? UtilVmRead(VmcsField::kGuestLinearAddress)
                            : 0;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
    if (!ept_entry || !ept_entry->all) {
      // EPT entry miss. It should be device memory.
      HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

      if (!IsReleaseBuild()) {
        NT_VERIFY(EptpIsDeviceMemory(fault_pa));
      }
      EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

      UtilInveptAll();
      return;
    }
  }
  HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                               fault_pa);
}
예제 #2
0
// INVLPG
_Use_decl_annotations_ static void VmmpHandleInvalidateTLBEntry(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  const auto invalidate_address =
      static_cast<ULONG_PTR>(UtilVmRead(VmcsField::kExitQualification));
  AsmInvlpg(invalidate_address);
  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #3
0
// XSETBV. It is executed at the time of system resuming
_Use_decl_annotations_ static void VmmpHandleXsetbv(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  AsmXsetbv(static_cast<ULONG>(guest_context->gp_regs->cx),
            static_cast<ULONG>(guest_context->gp_regs->dx),
            static_cast<ULONG>(guest_context->gp_regs->ax));

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #4
0
// XSETBV. It is executed at the time of system resuming
_Use_decl_annotations_ static void VmmpHandleXsetbv(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  ULARGE_INTEGER value = {};
  value.LowPart = static_cast<ULONG>(guest_context->gp_regs->ax);
  value.HighPart = static_cast<ULONG>(guest_context->gp_regs->dx);
  _xsetbv(static_cast<ULONG>(guest_context->gp_regs->cx), value.QuadPart);

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #5
0
// RDTSC
_Use_decl_annotations_ static void VmmpHandleRdtsc(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  ULARGE_INTEGER tsc = {};
  tsc.QuadPart = __rdtsc();
  guest_context->gp_regs->dx = tsc.HighPart;
  guest_context->gp_regs->ax = tsc.LowPart;

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #6
0
// Deal with L2 EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolationEx(EptData *ept_data, EptData *ept_data02, ULONG_PTR guest_pa, bool is_range_of_ept12) {
	
	const EptViolationQualification exit_qualification = {
		UtilVmRead(VmcsField::kExitQualification) };
	ULONG_PTR fault_pa = 0;

	if (!guest_pa)
	{
		 fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
	}
	else
	{
		fault_pa = guest_pa;
	}
	
	const auto fault_va = reinterpret_cast<void *>(
		exit_qualification.fields.valid_guest_linear_address
		? UtilVmRead(VmcsField::kGuestLinearAddress)
		: 0);


	//GuestPhysicalAddress will be the guest physical adderss of EPT1-2 Entry , we disable it write in L2 first initial

	if (!exit_qualification.fields.ept_readable &&
		!exit_qualification.fields.ept_writeable &&
		!exit_qualification.fields.ept_executable) {
		const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
		if (!ept_entry || !ept_entry->all) {
			// EPT entry miss. It should be device memory.
			HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

			if (!IsReleaseBuild()) {
				NT_VERIFY(EptpIsDeviceMemory(fault_pa));
			}
			EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

			UtilInveptGlobal();
			return;
		}
	}
	
	if (!exit_qualification.fields.ept_writeable && is_range_of_ept12)
	{
		EptCommonEntry* Ept01Pte = EptGetEptPtEntry(ept_data, UtilVmRead64(VmcsField::kGuestPhysicalAddress));
		if (Ept01Pte)
		{  
			EptCommonEntry* entry = (EptCommonEntry*)UtilVaFromPa(UtilVmRead64(VmcsField::kGuestPhysicalAddress));
			Ept01Pte->fields.write_access = true;
			HYPERPLATFORM_LOG_DEBUG_SAFE("Faced non-writable address but it is readble. :%p  %p", UtilVmRead64(VmcsField::kGuestPhysicalAddress), entry->fields.physial_address);
			UtilInveptGlobal();
		}
	}
	 
}
예제 #7
0
// VMX instructions except for VMCALL
_Use_decl_annotations_ static void VmmpHandleVmx(GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  guest_context->flag_reg.fields.cf = true;  // Error without status
  guest_context->flag_reg.fields.pf = false;
  guest_context->flag_reg.fields.af = false;
  guest_context->flag_reg.fields.zf = false;  // Error without status
  guest_context->flag_reg.fields.sf = false;
  guest_context->flag_reg.fields.of = false;
  UtilVmWrite(VmcsField::kGuestRflags, guest_context->flag_reg.all);
  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #8
0
// MOV to / from DRx
_Use_decl_annotations_ static void VmmpHandleDrAccess(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  const MovDrQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};
  const auto register_used =
      VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context);

  // Emulate the instruction
  switch (static_cast<MovDrDirection>(exit_qualification.fields.direction)) {
    case MovDrDirection::kMoveToDr:
      // clang-format off
      switch (exit_qualification.fields.debugl_register) {
        case 0: __writedr(0, *register_used); break;
        case 1: __writedr(1, *register_used); break;
        case 2: __writedr(2, *register_used); break;
        case 3: __writedr(3, *register_used); break;
        case 4: __writedr(4, *register_used); break;
        case 5: __writedr(5, *register_used); break;
        case 6: __writedr(6, *register_used); break;
        case 7: __writedr(7, *register_used); break;
        default: break;
      }
      // clang-format on
      break;
    case MovDrDirection::kMoveFromDr:
      // clang-format off
      switch (exit_qualification.fields.debugl_register) {
        case 0: *register_used = __readdr(0); break;
        case 1: *register_used = __readdr(1); break;
        case 2: *register_used = __readdr(2); break;
        case 3: *register_used = __readdr(3); break;
        case 4: *register_used = __readdr(4); break;
        case 5: *register_used = __readdr(5); break;
        case 6: *register_used = __readdr(6); break;
        case 7: *register_used = __readdr(7); break;
        default: break;
      }
      // clang-format on
      break;
    default:
      HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0, 0,
                                     0);
      break;
  }

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #9
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va =
      exit_qualification.fields.valid_guest_linear_address
          ? reinterpret_cast<void *>(UtilVmRead(VmcsField::kGuestLinearAddress))
          : nullptr;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    // EPT entry miss. It should be device memory.
    HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
    // HYPERPLATFORM_LOG_DEBUG_SAFE(
    //    "[INIT] Dev VA = %p, PA = %016llx, Used = %d",
    //    0, fault_pa, ept_data->preallocated_entries_count);

    if (!IsReleaseBuild()) {
      const auto is_device_memory = EptpIsDeviceMemory(fault_pa);
      NT_ASSERT(is_device_memory);
      UNREFERENCED_PARAMETER(is_device_memory);
    }

    // There is a race condition here. If multiple processors reach this code
    // with the same fault_pa, this function may create multiple EPT entries for
    // one physical address and leads memory leak. This call should probably be
    // guarded by a spin-lock but is not yet just because impact is so small.
    EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

    UtilInveptAll();
  } else if (exit_qualification.fields.caused_by_translation &&
             exit_qualification.fields.execute_access &&
             !exit_qualification.fields.ept_executable) {
    const auto ept_pt_entry = EptGetEptPtEntry(ept_data->ept_pml4, 4, fault_pa);

    MmoneptHandleDodgyRegionExecution(ept_data->hs_ept_data, ept_pt_entry,
                                      fault_pa, fault_va);
  } else {
    HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                                 fault_pa);
  }
}
예제 #10
0
// CPUID
_Use_decl_annotations_ static void VmmpHandleCpuid(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  unsigned int cpu_info[4] = {};
  const auto function_id = static_cast<int>(guest_context->gp_regs->ax);
  const auto sub_function_id = static_cast<int>(guest_context->gp_regs->cx);

  if (function_id == 0 && sub_function_id == kHyperPlatformVmmBackdoorCode) {
    // Say "Pong by VMM!" when the back-door code was given
    guest_context->gp_regs->bx = 'gnoP';
    guest_context->gp_regs->dx = ' yb ';
    guest_context->gp_regs->cx = '!MMV';
  } else {
    __cpuidex(reinterpret_cast<int *>(cpu_info), function_id, sub_function_id);
    guest_context->gp_regs->ax = cpu_info[0];
    guest_context->gp_regs->bx = cpu_info[1];
    guest_context->gp_regs->cx = cpu_info[2];
    guest_context->gp_regs->dx = cpu_info[3];
  }

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #11
0
// EXIT_REASON_EPT_VIOLATION
_Use_decl_annotations_ static void VmmpHandleEptViolation(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  EptHandleEptViolation(
      guest_context->stack->processor_data->shared_data->ept_data);
}
예제 #12
0
// INVD
_Use_decl_annotations_ static void VmmpHandleInvalidateInternalCaches(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  AsmInvalidateInternalCaches();
  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #13
0
// MOV to / from CRx
_Use_decl_annotations_ static void VmmpHandleCrAccess(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  const MovCrQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto register_used =
      VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context);

  switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) {
    case MovCrAccessType::kMoveToCr: {
      switch (exit_qualification.fields.control_register) {
        // CR0 <- Reg
        case 0:
          if (UtilIsX86Pae()) {
            UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3));
          }
          UtilVmWrite(VmcsField::kGuestCr0, *register_used);
          UtilVmWrite(VmcsField::kCr0ReadShadow, *register_used);
          break;

        // CR3 <- Reg
        case 3:
          if (UtilIsX86Pae()) {
            UtilLoadPdptes(*register_used);
          }
          UtilVmWrite(VmcsField::kGuestCr3, *register_used);
          break;

        // CR4 <- Reg
        case 4:
          if (UtilIsX86Pae()) {
            UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3));
          }
          UtilVmWrite(VmcsField::kGuestCr4, *register_used);
          UtilVmWrite(VmcsField::kCr4ReadShadow, *register_used);
          break;

        // CR8 <- Reg
        case 8:
          guest_context->cr8 = *register_used;
          break;

        default:
          HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0,
                                         0);
          break;
      }
    } break;

    // Note that MOV from CRx should never cause VM-exit with the current
    // settings. This is just for case when you enable it.
    case MovCrAccessType::kMoveFromCr: {
      switch (exit_qualification.fields.control_register) {
        // Reg <- CR3
        case 3:
          *register_used = UtilVmRead(VmcsField::kGuestCr3);
          break;

        // Reg <- CR8
        case 8:
          *register_used = guest_context->cr8;
          break;

        default:
          HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0,
                                         0);
          break;
      }
    } break;

    // Unimplemented
    case MovCrAccessType::kClts:
    case MovCrAccessType::kLmsw:
    default:
      HYPERPLATFORM_COMMON_DBG_BREAK();
      break;
  }

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #14
0
// LLDT, LTR, SLDT, and STR
_Use_decl_annotations_ static void VmmpHandleLdtrOrTrAccess(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  const LdtrOrTrAccessQualification exit_qualification = {
      static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))};

  // Calculate an address or a register to be used for the instruction
  const auto displacement = UtilVmRead(VmcsField::kExitQualification);

  ULONG_PTR operation_address = 0;
  if (exit_qualification.fields.register_access) {
    // Register
    const auto register_used =
        VmmpSelectRegister(exit_qualification.fields.register1, guest_context);
    operation_address = reinterpret_cast<ULONG_PTR>(register_used);
  } else {
    // Base
    ULONG_PTR base_value = 0;
    if (!exit_qualification.fields.base_register_invalid) {
      const auto register_used = VmmpSelectRegister(
          exit_qualification.fields.base_register, guest_context);
      base_value = *register_used;
    }

    // Index
    ULONG_PTR index_value = 0;
    if (!exit_qualification.fields.index_register_invalid) {
      const auto register_used = VmmpSelectRegister(
          exit_qualification.fields.index_register, guest_context);
      index_value = *register_used;
      switch (
          static_cast<GdtrOrIdtrScaling>(exit_qualification.fields.scalling)) {
        case GdtrOrIdtrScaling::kNoScaling:
          index_value = index_value;
          break;
        case GdtrOrIdtrScaling::kScaleBy2:
          index_value = index_value * 2;
          break;
        case GdtrOrIdtrScaling::kScaleBy4:
          index_value = index_value * 4;
          break;
        case GdtrOrIdtrScaling::kScaleBy8:
          index_value = index_value * 8;
          break;
        default:
          break;
      }
    }

    operation_address = base_value + index_value + displacement;
    if (static_cast<GdtrOrIdtrAaddressSize>(
            exit_qualification.fields.address_size) ==
        GdtrOrIdtrAaddressSize::k32bit) {
      operation_address &= MAXULONG;
    }
  }

  // Update CR3 with that of the guest since below code is going to access
  // memory.
  const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3);
  const auto vmm_cr3 = __readcr3();
  __writecr3(guest_cr3);

  // Emulate the instruction
  auto selector = reinterpret_cast<USHORT *>(operation_address);
  switch (static_cast<LdtrOrTrInstructionIdentity>(
      exit_qualification.fields.instruction_identity)) {
    case LdtrOrTrInstructionIdentity::kSldt:
      *selector =
          static_cast<USHORT>(UtilVmRead(VmcsField::kGuestLdtrSelector));
      break;
    case LdtrOrTrInstructionIdentity::kStr:
      *selector = static_cast<USHORT>(UtilVmRead(VmcsField::kGuestTrSelector));
      break;
    case LdtrOrTrInstructionIdentity::kLldt:
      UtilVmWrite(VmcsField::kGuestLdtrSelector, *selector);
      break;
    case LdtrOrTrInstructionIdentity::kLtr:
      UtilVmWrite(VmcsField::kGuestTrSelector, *selector);
      break;
  }

  __writecr3(vmm_cr3);
  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #15
0
// LIDT, SIDT, LGDT and SGDT
_Use_decl_annotations_ static void VmmpHandleGdtrOrIdtrAccess(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  const GdtrOrIdtrAccessQualification exit_qualification = {
      static_cast<ULONG32>(UtilVmRead(VmcsField::kVmxInstructionInfo))};

  // Calculate an address to be used for the instruction
  const auto displacement = UtilVmRead(VmcsField::kExitQualification);

  // Base
  ULONG_PTR base_value = 0;
  if (!exit_qualification.fields.base_register_invalid) {
    const auto register_used = VmmpSelectRegister(
        exit_qualification.fields.base_register, guest_context);
    base_value = *register_used;
  }

  // Index
  ULONG_PTR index_value = 0;
  if (!exit_qualification.fields.index_register_invalid) {
    const auto register_used = VmmpSelectRegister(
        exit_qualification.fields.index_register, guest_context);
    index_value = *register_used;
    switch (
        static_cast<GdtrOrIdtrScaling>(exit_qualification.fields.scalling)) {
      case GdtrOrIdtrScaling::kNoScaling:
        index_value = index_value;
        break;
      case GdtrOrIdtrScaling::kScaleBy2:
        index_value = index_value * 2;
        break;
      case GdtrOrIdtrScaling::kScaleBy4:
        index_value = index_value * 4;
        break;
      case GdtrOrIdtrScaling::kScaleBy8:
        index_value = index_value * 8;
        break;
      default:
        break;
    }
  }

  auto operation_address = base_value + index_value + displacement;
  if (static_cast<GdtrOrIdtrAaddressSize>(
          exit_qualification.fields.address_size) ==
      GdtrOrIdtrAaddressSize::k32bit) {
    operation_address &= MAXULONG;
  }

  // Update CR3 with that of the guest since below code is going to access
  // memory.
  const auto guest_cr3 = UtilVmRead(VmcsField::kGuestCr3);
  const auto vmm_cr3 = __readcr3();
  __writecr3(guest_cr3);

  // Emulate the instruction
  auto descriptor_table_reg = reinterpret_cast<Idtr *>(operation_address);
  switch (static_cast<GdtrOrIdtrInstructionIdentity>(
      exit_qualification.fields.instruction_identity)) {
    case GdtrOrIdtrInstructionIdentity::kSgdt:
      descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestGdtrBase);
      descriptor_table_reg->limit =
          static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestGdtrLimit));
      break;
    case GdtrOrIdtrInstructionIdentity::kSidt:
      descriptor_table_reg->base = UtilVmRead(VmcsField::kGuestIdtrBase);
      descriptor_table_reg->limit =
          static_cast<unsigned short>(UtilVmRead(VmcsField::kGuestIdtrLimit));
      break;
    case GdtrOrIdtrInstructionIdentity::kLgdt:
      UtilVmWrite(VmcsField::kGuestGdtrBase, descriptor_table_reg->base);
      UtilVmWrite(VmcsField::kGuestGdtrLimit, descriptor_table_reg->limit);
      break;
    case GdtrOrIdtrInstructionIdentity::kLidt:
      UtilVmWrite(VmcsField::kGuestIdtrBase, descriptor_table_reg->base);
      UtilVmWrite(VmcsField::kGuestIdtrLimit, descriptor_table_reg->limit);
      break;
  }

  __writecr3(vmm_cr3);
  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #16
0
// MOV to / from CRx
_Use_decl_annotations_ static void VmmpHandleCrAccess(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  const MovCrQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto register_used =
      VmmpSelectRegister(exit_qualification.fields.gp_register, guest_context);

  switch (static_cast<MovCrAccessType>(exit_qualification.fields.access_type)) {
    case MovCrAccessType::kMoveToCr:
      switch (exit_qualification.fields.control_register) {
        // CR0 <- Reg
        case 0: {
          HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
          if (UtilIsX86Pae()) {
            UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3));
          }
          UtilVmWrite(VmcsField::kGuestCr0, *register_used);
          UtilVmWrite(VmcsField::kCr0ReadShadow, *register_used);
          break;
        }

        // CR3 <- Reg
        case 3: {
          HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
          if (UtilIsX86Pae()) {
            UtilLoadPdptes(*register_used);
          }
          UtilVmWrite(VmcsField::kGuestCr3, *register_used);
          break;
        }

        // CR4 <- Reg
        case 4: {
          HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
          if (UtilIsX86Pae()) {
            UtilLoadPdptes(UtilVmRead(VmcsField::kGuestCr3));
          }
          UtilVmWrite(VmcsField::kGuestCr4, *register_used);
          UtilVmWrite(VmcsField::kCr4ReadShadow, *register_used);
          break;
        }

        // CR8 <- Reg
        case 8: {
          HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
          guest_context->cr8 = *register_used;
          break;
        }

        default:
          HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0,
                                         0, 0);
          break;
      }
      break;

    case MovCrAccessType::kMoveFromCr:
      switch (exit_qualification.fields.control_register) {
        // Reg <- CR3
        case 3: {
          HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
          *register_used = UtilVmRead(VmcsField::kGuestCr3);
          break;
        }

        // Reg <- CR8
        case 8: {
          HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
          *register_used = guest_context->cr8;
          break;
        }

        default:
          HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnspecified, 0,
                                         0, 0);
          break;
      }
      break;

    // Unimplemented
    case MovCrAccessType::kClts:
    case MovCrAccessType::kLmsw:
    default:
      HYPERPLATFORM_COMMON_DBG_BREAK();
      break;
  }

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
예제 #17
0
// Dispatches VM-exit to a corresponding handler
_Use_decl_annotations_ static void VmmpHandleVmExit(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

  const VmExitInformation exit_reason = {
      static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitReason))};

  if (kVmmpEnableRecordVmExit) {
    // Save them for ease of trouble shooting
    const auto processor = KeGetCurrentProcessorNumber();
    auto &index = g_vmmp_next_history_index[processor];
    auto &history = g_vmmp_vm_exit_history[processor][index];

    history.gp_regs = *guest_context->gp_regs;
    history.ip = guest_context->ip;
    history.exit_reason = exit_reason;
    history.exit_qualification = UtilVmRead(VmcsField::kExitQualification);
    history.instruction_info = UtilVmRead(VmcsField::kVmxInstructionInfo);
    if (++index == kVmmpNumberOfRecords) {
      index = 0;
    }
  }

  switch (exit_reason.fields.reason) {
    case VmxExitReason::kExceptionOrNmi:
      VmmpHandleException(guest_context);
      break;
    case VmxExitReason::kTripleFault:
      VmmpHandleTripleFault(guest_context);
      break;
    case VmxExitReason::kCpuid:
      VmmpHandleCpuid(guest_context);
      break;
    case VmxExitReason::kInvd:
      VmmpHandleInvalidateInternalCaches(guest_context);
      break;
    case VmxExitReason::kInvlpg:
      VmmpHandleInvalidateTLBEntry(guest_context);
      break;
    case VmxExitReason::kRdtsc:
      VmmpHandleRdtsc(guest_context);
      break;
    case VmxExitReason::kCrAccess:
      VmmpHandleCrAccess(guest_context);
      break;
    case VmxExitReason::kDrAccess:
      VmmpHandleDrAccess(guest_context);
      break;
    case VmxExitReason::kMsrRead:
      VmmpHandleMsrReadAccess(guest_context);
      break;
    case VmxExitReason::kMsrWrite:
      VmmpHandleMsrWriteAccess(guest_context);
      break;
    case VmxExitReason::kGdtrOrIdtrAccess:
      VmmpHandleGdtrOrIdtrAccess(guest_context);
      break;
    case VmxExitReason::kLdtrOrTrAccess:
      VmmpHandleLdtrOrTrAccess(guest_context);
      break;
    case VmxExitReason::kEptViolation:
      VmmpHandleEptViolation(guest_context);
      break;
    case VmxExitReason::kEptMisconfig:
      VmmpHandleEptMisconfig(guest_context);
      break;
    case VmxExitReason::kVmcall:
      VmmpHandleVmCall(guest_context);
      break;
    case VmxExitReason::kVmclear:
    case VmxExitReason::kVmlaunch:
    case VmxExitReason::kVmptrld:
    case VmxExitReason::kVmptrst:
    case VmxExitReason::kVmread:
    case VmxExitReason::kVmresume:
    case VmxExitReason::kVmwrite:
    case VmxExitReason::kVmoff:
    case VmxExitReason::kVmon:
      VmmpHandleVmx(guest_context);
      break;
    case VmxExitReason::kRdtscp:
      VmmpHandleRdtscp(guest_context);
      break;
    case VmxExitReason::kXsetbv:
      VmmpHandleXsetbv(guest_context);
      break;
    default:
      VmmpHandleUnexpectedExit(guest_context);
      break;
  }
}
예제 #18
0
// Interrupt
_Use_decl_annotations_ static void VmmpHandleException(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  const VmExitInterruptionInformationField exception = {
      static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrInfo))};

  if (static_cast<interruption_type>(exception.fields.interruption_type) ==
      interruption_type::kHardwareException) {
    // Hardware exception
    if (static_cast<InterruptionVector>(exception.fields.vector) ==
        InterruptionVector::kPageFaultException) {
      // #PF
      const PageFaultErrorCode fault_code = {
          static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode))};
      const auto fault_address = UtilVmRead(VmcsField::kExitQualification);

      VmEntryInterruptionInformationField inject = {};
      inject.fields.interruption_type = exception.fields.interruption_type;
      inject.fields.vector = exception.fields.vector;
      inject.fields.deliver_error_code = true;
      inject.fields.valid = true;
      AsmWriteCR2(fault_address);
      UtilVmWrite(VmcsField::kVmEntryExceptionErrorCode, fault_code.all);
      UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all);
      HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #PF Fault= %p Code= 0x%2x",
                                  guest_context->ip, fault_address, fault_code);

    } else if (static_cast<InterruptionVector>(exception.fields.vector) ==
               InterruptionVector::kGeneralProtectionException) {
      // # GP
      const auto error_code =
          static_cast<ULONG32>(UtilVmRead(VmcsField::kVmExitIntrErrorCode));

      VmEntryInterruptionInformationField inject = {};
      inject.fields.interruption_type = exception.fields.interruption_type;
      inject.fields.vector = exception.fields.vector;
      inject.fields.deliver_error_code = true;
      inject.fields.valid = true;
      UtilVmWrite(VmcsField::kVmEntryExceptionErrorCode, error_code);
      UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all);
      HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #GP Code= 0x%2x",
                                  guest_context->ip, error_code);

    } else {
      HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0);
    }

  } else if (static_cast<interruption_type>(
                 exception.fields.interruption_type) ==
             interruption_type::kSoftwareException) {
    // Software exception
    if (static_cast<InterruptionVector>(exception.fields.vector) ==
        InterruptionVector::kBreakpointException) {
      // #BP
      VmEntryInterruptionInformationField inject = {};
      inject.fields.interruption_type = exception.fields.interruption_type;
      inject.fields.vector = exception.fields.vector;
      inject.fields.deliver_error_code = false;
      inject.fields.valid = true;
      UtilVmWrite(VmcsField::kVmEntryIntrInfoField, inject.all);
      UtilVmWrite(VmcsField::kVmEntryInstructionLen, 1);
      HYPERPLATFORM_LOG_INFO_SAFE("GuestIp= %p, #BP ", guest_context->ip);

    } else {
      HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0);
    }
  } else {
    HYPERPLATFORM_COMMON_BUG_CHECK(HyperPlatformBugCheck::kUnknown, 0, 0, 0);
  }
}
예제 #19
0
// WRMSR
_Use_decl_annotations_ static void VmmpHandleMsrWriteAccess(
    GuestContext *guest_context) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  VmmpHandleMsrAccess(guest_context, false);
}
예제 #20
0
// A wrapper of RtlPcToFileHeader
_Use_decl_annotations_ PVOID MmonPcToFileHeader(PVOID pc_value) {
  HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
  void *base = nullptr;
  return g_mmonp_RtlPcToFileHeader(pc_value, &base);
}