Exemple #1
0
// See: VMM SETUP & TEAR DOWN
_Use_decl_annotations_ static bool VmpEnterVmxMode(
    ProcessorData *processor_data) {
  // Apply FIXED bits
  const Cr0 cr0_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed0)};
  const Cr0 cr0_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr0Fixed1)};
  Cr0 cr0 = {__readcr0()};
  cr0.all &= cr0_fixed1.all;
  cr0.all |= cr0_fixed0.all;
  __writecr0(cr0.all);

  const Cr4 cr4_fixed0 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed0)};
  const Cr4 cr4_fixed1 = {UtilReadMsr(Msr::kIa32VmxCr4Fixed1)};
  Cr4 cr4 = {__readcr4()};
  cr4.all &= cr4_fixed1.all;
  cr4.all |= cr4_fixed0.all;
  __writecr4(cr4.all);

  // Write a VMCS revision identifier
  const Ia32VmxBasicMsr vmx_basic_msr = {UtilReadMsr64(Msr::kIa32VmxBasic)};
  processor_data->vmxon_region->revision_identifier =
      vmx_basic_msr.fields.revision_identifier;

  auto vmxon_region_pa = UtilPaFromVa(processor_data->vmxon_region);
  if (__vmx_on(&vmxon_region_pa)) {
    return false;
  }

  UtilInveptAll();
  return true;
}
Exemple #2
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va = exit_qualification.fields.valid_guest_linear_address
                            ? UtilVmRead(VmcsField::kGuestLinearAddress)
                            : 0;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
    if (!ept_entry || !ept_entry->all) {
      // EPT entry miss. It should be device memory.
      HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

      if (!IsReleaseBuild()) {
        NT_VERIFY(EptpIsDeviceMemory(fault_pa));
      }
      EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

      UtilInveptAll();
      return;
    }
  }
  HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                               fault_pa);
}
Exemple #3
0
// VMCALL
_Use_decl_annotations_ static void VmmpHandleVmCall(
    GuestContext *guest_context) {
  // VMCALL for Sushi expects that cx holds a command number, and dx holds an
  // address of a context parameter optionally
  const auto hypercall_number = guest_context->gp_regs->cx;
  const auto context = reinterpret_cast<void *>(guest_context->gp_regs->dx);

  if (hypercall_number == kHyperPlatformVmmBackdoorCode) {
    // Unloading requested
    HYPERPLATFORM_COMMON_DBG_BREAK();

    // The processor sets ffff to limits of IDT and GDT when VM-exit occurred.
    // It is not correct value but fine to ignore since vmresume loads correct
    // values from VMCS. But here, we are going to skip vmresume and simply
    // return to where VMCALL is executed. It results in keeping those broken
    // values and ends up with bug check 109, so we should fix them manually.
    const auto gdt_limit = UtilVmRead(VmcsField::kGuestGdtrLimit);
    const auto gdt_base = UtilVmRead(VmcsField::kGuestGdtrBase);
    const auto idt_limit = UtilVmRead(VmcsField::kGuestIdtrLimit);
    const auto idt_base = UtilVmRead(VmcsField::kGuestIdtrBase);
    Gdtr gdtr = {static_cast<USHORT>(gdt_limit), gdt_base};
    Idtr idtr = {static_cast<USHORT>(idt_limit), idt_base};
    __lgdt(&gdtr);
    __lidt(&idtr);

    // Store an address of the management structure to the context parameter
    const auto result_ptr = reinterpret_cast<ProcessorData **>(context);
    *result_ptr = guest_context->stack->processor_data;
    HYPERPLATFORM_LOG_DEBUG_SAFE("Context at %p %p", context,
                                 guest_context->stack->processor_data);

    // Set rip to the next instruction of VMCALL
    const auto exit_instruction_length =
        UtilVmRead(VmcsField::kVmExitInstructionLen);
    const auto return_address = guest_context->ip + exit_instruction_length;

    // Since rflags is overwritten after VMXOFF, we should manually indicates
    // that VMCALL was successful by clearing those flags.
    guest_context->flag_reg.fields.cf = false;
    guest_context->flag_reg.fields.zf = false;

    // Set registers used after VMXOFF to recover the context. Volatile
    // registers must be used because those changes are reflected to the
    // guest's context after VMXOFF.
    guest_context->gp_regs->cx = return_address;
    guest_context->gp_regs->dx = guest_context->gp_regs->sp;
    guest_context->gp_regs->ax = guest_context->flag_reg.all;
    guest_context->vm_continue = false;

    UtilInveptAll();

  } else {
    // Unsupported hypercall. Handle like other VMX instructions
    VmmpHandleVmx(guest_context);
  }
}
Exemple #4
0
// Stop showing a shadow page
_Use_decl_annotations_ static void SbppDisablePageShadowing(
    const PatchInformation& info, EptData* ept_data) {
  //    Replace with a fake copy
  const auto pa_base = UtilPaFromVa(PAGE_ALIGN(info.patch_address));
  const auto ept_pt_entry = EptGetEptPtEntry(ept_data, pa_base);
  ept_pt_entry->fields.execute_access = true;
  ept_pt_entry->fields.write_access = true;
  ept_pt_entry->fields.read_access = true;
  ept_pt_entry->fields.physial_address = UtilPfnFromPa(pa_base);
  UtilInveptAll();
}
Exemple #5
0
// Show a shadowed page for read and write
_Use_decl_annotations_ static void SbppEnablePageShadowingForRW(
    const PatchInformation& info, EptData* ept_data) {
  const auto ept_pt_entry =
      EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address));

  // Allow a guest to read and write as well as execute the address. Show the
  // copied page for read/write that does not have an breakpoint but reflects
  // all modification by a guest if that happened.
  ept_pt_entry->fields.write_access = true;
  ept_pt_entry->fields.read_access = true;
  ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_rw);

  UtilInveptAll();
}
Exemple #6
0
// Show a shadowed page for execution
_Use_decl_annotations_ static void SbppEnablePageShadowingForExec(
    const PatchInformation& info, EptData* ept_data) {
  const auto ept_pt_entry =
      EptGetEptPtEntry(ept_data, UtilPaFromVa(info.patch_address));

  // Allow the VMM to redirect read and write access to the address by dening
  // those accesses and handling them on EPT violation
  ept_pt_entry->fields.write_access = false;
  ept_pt_entry->fields.read_access = false;

  // Only execute is allowed now to the adresss. Show the copied page for exec
  // that has an actual breakpoint to the guest.
  ept_pt_entry->fields.physial_address = UtilPfnFromPa(info.pa_base_for_exec);

  UtilInveptAll();
}
Exemple #7
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va =
      exit_qualification.fields.valid_guest_linear_address
          ? reinterpret_cast<void *>(UtilVmRead(VmcsField::kGuestLinearAddress))
          : nullptr;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    // EPT entry miss. It should be device memory.
    HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
    // HYPERPLATFORM_LOG_DEBUG_SAFE(
    //    "[INIT] Dev VA = %p, PA = %016llx, Used = %d",
    //    0, fault_pa, ept_data->preallocated_entries_count);

    if (!IsReleaseBuild()) {
      const auto is_device_memory = EptpIsDeviceMemory(fault_pa);
      NT_ASSERT(is_device_memory);
      UNREFERENCED_PARAMETER(is_device_memory);
    }

    // There is a race condition here. If multiple processors reach this code
    // with the same fault_pa, this function may create multiple EPT entries for
    // one physical address and leads memory leak. This call should probably be
    // guarded by a spin-lock but is not yet just because impact is so small.
    EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

    UtilInveptAll();
  } else if (exit_qualification.fields.caused_by_translation &&
             exit_qualification.fields.execute_access &&
             !exit_qualification.fields.ept_executable) {
    const auto ept_pt_entry = EptGetEptPtEntry(ept_data->ept_pml4, 4, fault_pa);

    MmoneptHandleDodgyRegionExecution(ept_data->hs_ept_data, ept_pt_entry,
                                      fault_pa, fault_va);
  } else {
    HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                                 fault_pa);
  }
}