Beispiel #1
0
// Dumps guest's selectors
/*_Use_decl_annotations_*/ static void VmmpDumpGuestSelectors() {
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "es %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestEsSelector),
      UtilVmRead(VmcsField::kGuestEsBase), UtilVmRead(VmcsField::kGuestEsLimit),
      UtilVmRead(VmcsField::kGuestEsArBytes));
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "cs %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestCsSelector),
      UtilVmRead(VmcsField::kGuestCsBase), UtilVmRead(VmcsField::kGuestCsLimit),
      UtilVmRead(VmcsField::kGuestCsArBytes));
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "ss %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestSsSelector),
      UtilVmRead(VmcsField::kGuestSsBase), UtilVmRead(VmcsField::kGuestSsLimit),
      UtilVmRead(VmcsField::kGuestSsArBytes));
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "ds %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestDsSelector),
      UtilVmRead(VmcsField::kGuestDsBase), UtilVmRead(VmcsField::kGuestDsLimit),
      UtilVmRead(VmcsField::kGuestDsArBytes));
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "fs %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestFsSelector),
      UtilVmRead(VmcsField::kGuestFsBase), UtilVmRead(VmcsField::kGuestFsLimit),
      UtilVmRead(VmcsField::kGuestFsArBytes));
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "gs %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestGsSelector),
      UtilVmRead(VmcsField::kGuestGsBase), UtilVmRead(VmcsField::kGuestGsLimit),
      UtilVmRead(VmcsField::kGuestGsArBytes));
  HYPERPLATFORM_LOG_DEBUG_SAFE("ld %04x %p %08x %08x",
                               UtilVmRead(VmcsField::kGuestLdtrSelector),
                               UtilVmRead(VmcsField::kGuestLdtrBase),
                               UtilVmRead(VmcsField::kGuestLdtrLimit),
                               UtilVmRead(VmcsField::kGuestLdtrArBytes));
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "tr %04x %p %08x %08x", UtilVmRead(VmcsField::kGuestTrSelector),
      UtilVmRead(VmcsField::kGuestTrBase), UtilVmRead(VmcsField::kGuestTrLimit),
      UtilVmRead(VmcsField::kGuestTrArBytes));
}
Beispiel #2
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va = exit_qualification.fields.valid_guest_linear_address
                            ? UtilVmRead(VmcsField::kGuestLinearAddress)
                            : 0;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
    if (!ept_entry || !ept_entry->all) {
      // EPT entry miss. It should be device memory.
      HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

      if (!IsReleaseBuild()) {
        NT_VERIFY(EptpIsDeviceMemory(fault_pa));
      }
      EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

      UtilInveptAll();
      return;
    }
  }
  HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                               fault_pa);
}
Beispiel #3
0
_Use_decl_annotations_ void  EptpEnumerateEpt(EptData* EptData01, EptCommonEntry *pml4_table, bool writable)
{
	EptCommonEntry* pdptr_table = NULL;
	EptCommonEntry* pdt_table = NULL;
	EptCommonEntry* pt_table = NULL;
	ULONG level4 = 0 , level3 = 0, level2 =0, level1 = 0;

	HYPERPLATFORM_LOG_DEBUG_SAFE("EptpInvalidateTable start ");
	  
	if (!pml4_table)
	{
		return;
	}
	  
 	for (int i = 0; i < 512 && pml4_table; i++, pml4_table++, level4++)			//PML4
	{
		ULONG64 pdptr_entry_pa = (ULONG64)EptpGetNextLevelTablePhysicalBase(EptData01, pml4_table);
		if (!pdptr_entry_pa)
		{
			break;
		} 
		pdptr_table = (EptCommonEntry*)UtilVaFromPa(pdptr_entry_pa);
		for (int j = 0; j < 512 && pdptr_table; j++, pdptr_table++, level3++)	//PDPTR
		{ 
			EptpSetEntryAccess(EptData01,(ULONG64)pdptr_entry_pa, true, writable, true);
			ULONG64 pdt_entry_pa = (ULONG64)EptpGetNextLevelTablePhysicalBase(EptData01, pdptr_table);
			if (!pdt_entry_pa)
			{
				break;
			} 
			pdt_table = (EptCommonEntry*)UtilVaFromPa(pdt_entry_pa);
			for (int k = 0; k < 512 && pdt_table; k++, pdt_table++, level2++)		// PDT
			{ 
				EptpSetEntryAccess(EptData01, (ULONG64)pdt_entry_pa, true, writable, true);
				ULONG64 pt_entry_pa = (ULONG64)EptpGetNextLevelTablePhysicalBase(EptData01, pdt_table);
				if (!pt_entry_pa)
				{
					break;
				}
				EptpSetEntryAccess(EptData01, (ULONG64)pt_entry_pa, true, writable, true);
			}
		}
	}
	 
	HYPERPLATFORM_LOG_DEBUG_SAFE("EptpInvalidateTable end %d %d %d %d", level4, level3, level2, level1);
}
Beispiel #4
0
// VMCALL
_Use_decl_annotations_ static void VmmpHandleVmCall(
    GuestContext *guest_context) {
  // VMCALL for Sushi expects that cx holds a command number, and dx holds an
  // address of a context parameter optionally
  const auto hypercall_number = guest_context->gp_regs->cx;
  const auto context = reinterpret_cast<void *>(guest_context->gp_regs->dx);

  if (hypercall_number == kHyperPlatformVmmBackdoorCode) {
    // Unloading requested
    HYPERPLATFORM_COMMON_DBG_BREAK();

    // The processor sets ffff to limits of IDT and GDT when VM-exit occurred.
    // It is not correct value but fine to ignore since vmresume loads correct
    // values from VMCS. But here, we are going to skip vmresume and simply
    // return to where VMCALL is executed. It results in keeping those broken
    // values and ends up with bug check 109, so we should fix them manually.
    const auto gdt_limit = UtilVmRead(VmcsField::kGuestGdtrLimit);
    const auto gdt_base = UtilVmRead(VmcsField::kGuestGdtrBase);
    const auto idt_limit = UtilVmRead(VmcsField::kGuestIdtrLimit);
    const auto idt_base = UtilVmRead(VmcsField::kGuestIdtrBase);
    Gdtr gdtr = {static_cast<USHORT>(gdt_limit), gdt_base};
    Idtr idtr = {static_cast<USHORT>(idt_limit), idt_base};
    __lgdt(&gdtr);
    __lidt(&idtr);

    // Store an address of the management structure to the context parameter
    const auto result_ptr = reinterpret_cast<ProcessorData **>(context);
    *result_ptr = guest_context->stack->processor_data;
    HYPERPLATFORM_LOG_DEBUG_SAFE("Context at %p %p", context,
                                 guest_context->stack->processor_data);

    // Set rip to the next instruction of VMCALL
    const auto exit_instruction_length =
        UtilVmRead(VmcsField::kVmExitInstructionLen);
    const auto return_address = guest_context->ip + exit_instruction_length;

    // Since rflags is overwritten after VMXOFF, we should manually indicates
    // that VMCALL was successful by clearing those flags.
    guest_context->flag_reg.fields.cf = false;
    guest_context->flag_reg.fields.zf = false;

    // Set registers used after VMXOFF to recover the context. Volatile
    // registers must be used because those changes are reflected to the
    // guest's context after VMXOFF.
    guest_context->gp_regs->cx = return_address;
    guest_context->gp_regs->dx = guest_context->gp_regs->sp;
    guest_context->gp_regs->ax = guest_context->flag_reg.all;
    guest_context->vm_continue = false;

    UtilInveptAll();

  } else {
    // Unsupported hypercall. Handle like other VMX instructions
    VmmpHandleVmx(guest_context);
  }
}
Beispiel #5
0
// Deal with L2 EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolationEx(EptData *ept_data, EptData *ept_data02, ULONG_PTR guest_pa, bool is_range_of_ept12) {
	
	const EptViolationQualification exit_qualification = {
		UtilVmRead(VmcsField::kExitQualification) };
	ULONG_PTR fault_pa = 0;

	if (!guest_pa)
	{
		 fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
	}
	else
	{
		fault_pa = guest_pa;
	}
	
	const auto fault_va = reinterpret_cast<void *>(
		exit_qualification.fields.valid_guest_linear_address
		? UtilVmRead(VmcsField::kGuestLinearAddress)
		: 0);


	//GuestPhysicalAddress will be the guest physical adderss of EPT1-2 Entry , we disable it write in L2 first initial

	if (!exit_qualification.fields.ept_readable &&
		!exit_qualification.fields.ept_writeable &&
		!exit_qualification.fields.ept_executable) {
		const auto ept_entry = EptGetEptPtEntry(ept_data, fault_pa);
		if (!ept_entry || !ept_entry->all) {
			// EPT entry miss. It should be device memory.
			HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();

			if (!IsReleaseBuild()) {
				NT_VERIFY(EptpIsDeviceMemory(fault_pa));
			}
			EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

			UtilInveptGlobal();
			return;
		}
	}
	
	if (!exit_qualification.fields.ept_writeable && is_range_of_ept12)
	{
		EptCommonEntry* Ept01Pte = EptGetEptPtEntry(ept_data, UtilVmRead64(VmcsField::kGuestPhysicalAddress));
		if (Ept01Pte)
		{  
			EptCommonEntry* entry = (EptCommonEntry*)UtilVaFromPa(UtilVmRead64(VmcsField::kGuestPhysicalAddress));
			Ept01Pte->fields.write_access = true;
			HYPERPLATFORM_LOG_DEBUG_SAFE("Faced non-writable address but it is readble. :%p  %p", UtilVmRead64(VmcsField::kGuestPhysicalAddress), entry->fields.physial_address);
			UtilInveptGlobal();
		}
	}
	 
}
Beispiel #6
0
// Deal with EPT violation VM-exit.
_Use_decl_annotations_ void EptHandleEptViolation(EptData *ept_data) {
  const EptViolationQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto fault_pa = UtilVmRead64(VmcsField::kGuestPhysicalAddress);
  const auto fault_va =
      exit_qualification.fields.valid_guest_linear_address
          ? reinterpret_cast<void *>(UtilVmRead(VmcsField::kGuestLinearAddress))
          : nullptr;

  if (!exit_qualification.fields.ept_readable &&
      !exit_qualification.fields.ept_writeable &&
      !exit_qualification.fields.ept_executable) {
    // EPT entry miss. It should be device memory.
    HYPERPLATFORM_PERFORMANCE_MEASURE_THIS_SCOPE();
    // HYPERPLATFORM_LOG_DEBUG_SAFE(
    //    "[INIT] Dev VA = %p, PA = %016llx, Used = %d",
    //    0, fault_pa, ept_data->preallocated_entries_count);

    if (!IsReleaseBuild()) {
      const auto is_device_memory = EptpIsDeviceMemory(fault_pa);
      NT_ASSERT(is_device_memory);
      UNREFERENCED_PARAMETER(is_device_memory);
    }

    // There is a race condition here. If multiple processors reach this code
    // with the same fault_pa, this function may create multiple EPT entries for
    // one physical address and leads memory leak. This call should probably be
    // guarded by a spin-lock but is not yet just because impact is so small.
    EptpConstructTables(ept_data->ept_pml4, 4, fault_pa, ept_data);

    UtilInveptAll();
  } else if (exit_qualification.fields.caused_by_translation &&
             exit_qualification.fields.execute_access &&
             !exit_qualification.fields.ept_executable) {
    const auto ept_pt_entry = EptGetEptPtEntry(ept_data->ept_pml4, 4, fault_pa);

    MmoneptHandleDodgyRegionExecution(ept_data->hs_ept_data, ept_pt_entry,
                                      fault_pa, fault_va);
  } else {
    HYPERPLATFORM_LOG_DEBUG_SAFE("[IGNR] OTH VA = %p, PA = %016llx", fault_va,
                                 fault_pa);
  }
}
Beispiel #7
0
// IN, INS, OUT, OUTS
_Use_decl_annotations_ static void VmmpHandleIoPort(
    GuestContext *guest_context) {
  const IoInstQualification exit_qualification = {
      UtilVmRead(VmcsField::kExitQualification)};

  const auto is_in = exit_qualification.fields.direction == 1;  // to memory?
  const auto is_string = exit_qualification.fields.string_instruction == 1;
  const auto is_rep = exit_qualification.fields.rep_prefixed == 1;
  const auto port = static_cast<USHORT>(exit_qualification.fields.port_number);
  const auto string_address = reinterpret_cast<void *>(
      (is_in) ? guest_context->gp_regs->di : guest_context->gp_regs->si);
  const auto count =
      static_cast<unsigned long>((is_rep) ? guest_context->gp_regs->cx : 1);
  const auto address =
      (is_string) ? string_address : &guest_context->gp_regs->ax;

  SIZE_T size_of_access = 0;
  switch (static_cast<IoInstSizeOfAccess>(
      exit_qualification.fields.size_of_access)) {
    case IoInstSizeOfAccess::k1Byte:
      size_of_access = 1;
      break;
    case IoInstSizeOfAccess::k2Byte:
      size_of_access = 2;
      break;
    case IoInstSizeOfAccess::k4Byte:
      size_of_access = 4;
      break;
  }

  HYPERPLATFORM_LOG_DEBUG_SAFE("GuestIp= %p, Port= %04x, %s%s",
                               guest_context->ip, port, (is_in ? "IN" : "OUT"),
                               (is_string ? "S" : ""));

  VmmpIoWrapper(is_in, is_string, size_of_access, port, address, count);
  // FIXME; Guest's ECX should be changed on is_rep == 1
  // FIXME: EDI and ESI need to be changed on is_string == 1

  VmmpAdjustGuestInstructionPointer(guest_context->ip);
}
Beispiel #8
0
// Checks if the current process's token matchs with one of system tokens. If so
// queue work item to terminate the process.
_Use_decl_annotations_ void EopmonCheckCurrentProcessToken() {
  // PLACE TO IMPROVE:
  // This check is performed only when CR3 is changed. While it seems frequent
  // enough to detect an escalated process before it does anything meaningful,
  // there is a window allowing exploit running with SYSTEM privileges a bit
  // while. To fill this gap, it may be an idea to perform this check more
  // often. For example, setting 0xCC at a SYSTENTER handler, handling #BP in
  // the hypervisor and calling this function will be more frequent (although
  // it may slow down the system more).

  // Ignore when IRQL is higher than DISPATCH_LEVEL. EopMon could schedule DPC
  // that queues a work item if an elevatated process was found, but it is not
  // implemented for simplicity.
  if (KeGetCurrentIrql() > DISPATCH_LEVEL) {
    return;
  }

  const auto& system_tokens = *g_eopmonp_system_process_tokens;
  const auto& system_process_ids = *g_eopmonp_system_process_ids;
  auto& being_killed_pids = g_eopmonp_processes_being_killed;

  const auto process = PsGetCurrentProcess();
  const auto pid = PsGetProcessId(process);

  // Is it a known, safe process?
  for (auto system_pid : system_process_ids) {
    if (pid == system_pid) {
      // Yes, it is. This process is ok.
      return;
    }
  }

  // It its token one of those of system processes?
  const char* system_process_name = nullptr;
  const auto token = EopmonpGetProcessToken(process);
  for (auto& system_token : system_tokens) {
    if (token == system_token.first) {
      system_process_name = system_token.second;
      break;
    }
  }
  if (!system_process_name) {
    // No, it is not. This process is ok.
    return;
  }

  // Is this PID already queued for termination?
  for (auto pid_being_killed : being_killed_pids) {
    if (pid == pid_being_killed) {
      // Yes, it is. Nothing to do.
      return;
    }
  }

  // We have found a process using the same system token as one of system
  // processes. Let us terminate the process.

  // PLACE TO IMPROVE:
  // It would be better off issuing a bug check rather than killing the process
  // because the system has already been exploited and could be somewhat
  // unstable condition. For example, the HalDispatchTable might have already
  // been modified, and the author found that running Necurs's exploit
  // (CVE-2015-0057) multiple times led to a bug check. For this reason, it
  // worth considering dieing spectacularly rather than giving (potentially)
  // false sense of security, or it may also be an option to suspend the process
  // if you are going to examine exactly how the process has done EoP.

  // HYPERPLATFORM_COMMON_DBG_BREAK();

  // Remember this PID as one already queued for termination
  for (auto& pid_being_killed : being_killed_pids) {
    if (!pid_being_killed) {
      pid_being_killed = pid;
      break;
    }
  }

  // Allocate and queue a work queue item for delayed termination
  const auto context = reinterpret_cast<EopmonWorkQueueItem*>(
      ExAllocatePoolWithTag(NonPagedPool, sizeof(EopmonWorkQueueItem),
                            kHyperPlatformCommonPoolTag));
  if (!context) {
    HYPERPLATFORM_LOG_WARN_SAFE("Memory allocation failure.");
    return;
  }
  ExInitializeWorkItem(&context->work_item,
                       EopmonpTerminateProcessWorkerRoutine, context);
  context->dodgy_pid = pid;
  context->system_process_name = system_process_name;
  ExQueueWorkItem(&context->work_item, CriticalWorkQueue);
  HYPERPLATFORM_LOG_DEBUG_SAFE(
      "Exploitation detected. Process %Iu has been queued for termination. "
      "Stolen token %p from %s",
      pid, token, system_process_name);
}