Esempio n. 1
1
void
GetGuestState()
{
    PHYSICAL_ADDRESS HighestAcceptableAddress;
    HighestAcceptableAddress.QuadPart = 0xFFFFFFFF00000000;

    g_GuestState.CR0 = __readcr0();
    g_GuestState.CR3 = __readcr3();
    g_GuestState.CR4 = __readcr4() | CR4_VMXE;
    g_GuestState.RFLAGS = __readeflags();

    g_GuestState.Cs = __readcs();
    g_GuestState.Ds = __readds();
    g_GuestState.Es = __reades();
    g_GuestState.Ss = __readss();
    g_GuestState.Fs = __readfs();
    g_GuestState.Gs = __readgs();
    g_GuestState.Ldtr = __sldt();
    g_GuestState.Tr = __str();

    __sgdt(&(g_GuestState.Gdtr));
    __sidt(&(g_GuestState.Idtr));

    g_GuestState.S_CS = __readmsr(IA32_SYSENTER_CS);
    g_GuestState.SEIP = __readmsr(IA64_SYSENTER_EIP);
    g_GuestState.SESP = __readmsr(IA32_SYSENTER_ESP);

    g_GuestState.VMXON = MmAllocateNonCachedMemory(PAGE_SIZE);
    RtlZeroMemory(g_GuestState.VMXON, PAGE_SIZE);

    g_GuestState.VMCS  = MmAllocateNonCachedMemory(PAGE_SIZE);
    RtlZeroMemory(g_GuestState.VMCS,  PAGE_SIZE);

    g_GuestState.hvStack =        // 分配的是非页面内存, 且保证在物理内存中是连续的, MmFreeContiguousMemory
        MmAllocateContiguousMemory(PAGE_SIZE * 2, HighestAcceptableAddress);
    RtlZeroMemory(g_GuestState.hvStack, PAGE_SIZE * 2);
}
Esempio n. 2
0
/// <summary>
/// Check if EPT, VPID and other stuff is supported
/// </summary>
/// <returns>Status code</returns>
NTSTATUS EptQuerySupport()
{
    IA32_VMX_PROCBASED_CTLS_MSR ctl = { 0 };
    IA32_VMX_PROCBASED_CTLS2_MSR ctl2 = { 0 };
    IA32_VMX_EPT_VPID_CAP_MSR vpidcap = { 0 };

    ctl.All = __readmsr( MSR_IA32_VMX_PROCBASED_CTLS );
    if (ctl.Fields.ActivateSecondaryControls == 0)
    {
        DPRINT( "HyperBone: CPU %d: %s: No secondary contol support\n", CPU_IDX, __FUNCTION__ );
        return STATUS_NOT_SUPPORTED;
    }

    // EPT/VPID
    ctl2.All = __readmsr( MSR_IA32_VMX_PROCBASED_CTLS2 );
    if (ctl2.Fields.EnableEPT == 0 || ctl2.Fields.EnableVPID == 0)
    {
        DPRINT( "HyperBone: CPU %d: %s: No EPT/VPID support\n", CPU_IDX, __FUNCTION__ );
        return STATUS_NOT_SUPPORTED;
    }

    g_Data->EPTSupported = ctl2.Fields.EnableEPT  != 0;
    g_Data->VPIDSpported = ctl2.Fields.EnableVPID != 0;

    // Execute only
    vpidcap.All = __readmsr( MSR_IA32_VMX_EPT_VPID_CAP );
    if (vpidcap.Fields.ExecuteOnly == 0)
    {
        DPRINT( "HyperBone: CPU %d: %s: No execute-only EPT translation support\n", CPU_IDX, __FUNCTION__ );
        return STATUS_NOT_SUPPORTED;
    }

    g_Data->EPTExecOnlySupported = TRUE;
    return STATUS_SUCCESS;
}
Esempio n. 3
0
_Use_decl_annotations_ EXTERN_C static bool VminitpEnterVmxMode(
    PER_PROCESSOR_DATA *ProcessorData) {
  // Apply FIXED bits
  const CR0_REG cr0Fixed0 = {__readmsr(IA32_VMX_CR0_FIXED0)};
  const CR0_REG cr0Fixed1 = {__readmsr(IA32_VMX_CR0_FIXED1)};
  CR0_REG cr0 = {__readcr0()};
  cr0.All &= cr0Fixed1.All;
  cr0.All |= cr0Fixed0.All;
  __writecr0(cr0.All);

  const CR4_REG cr4Fixed0 = {__readmsr(IA32_VMX_CR4_FIXED0)};
  const CR4_REG cr4Fixed1 = {__readmsr(IA32_VMX_CR4_FIXED1)};
  CR4_REG cr4 = {__readcr4()};
  cr4.All &= cr4Fixed1.All;
  cr4.All |= cr4Fixed0.All;
  __writecr4(cr4.All);

  // Write a VMCS revision identifier
  IA32_VMX_BASIC_MSR vmxBasicMsr = {__readmsr(IA32_VMX_BASIC)};
  ProcessorData->VmxonRegion->RevisionIdentifier =
      vmxBasicMsr.Fields.RevisionIdentifier;

  auto vmxonRegionPA = MmGetPhysicalAddress(ProcessorData->VmxonRegion);
  if (__vmx_on(
          reinterpret_cast<unsigned long long *>(&vmxonRegionPA.QuadPart))) {
    return false;
  }
  return true;
}
Esempio n. 4
0
_Use_decl_annotations_ EXTERN_C static bool VminitpIsVmxAvailable() {
  // DISCOVERING SUPPORT FOR VMX
  // If CPUID.1:ECX.VMX[bit 5]=1, then VMX operation is supported.
  int cpuInfo[4] = {};
  __cpuid(cpuInfo, 1);
  CPU_FEATURES_ECX cpuFeatures = {static_cast<ULONG_PTR>(cpuInfo[2])};
  if (!cpuFeatures.Fields.VMX) {
    LOG_ERROR("VMX features are not supported.");
    return false;
  }

  // BASIC VMX INFORMATION
  // The first processors to support VMX operation use the write-back type.
  IA32_VMX_BASIC_MSR vmxBasicMsr = {__readmsr(IA32_VMX_BASIC)};
  if (vmxBasicMsr.Fields.MemoryType != 6) {  // Write Back (WB)
    LOG_ERROR("Write-back cache type is not supported.");
    return false;
  }

  // ENABLING AND ENTERING VMX OPERATION
  IA32_FEATURE_CONTROL_MSR vmxFeatureControl = {
      __readmsr(IA32_FEATURE_CONTROL)};
  if (!vmxFeatureControl.Fields.Lock || !vmxFeatureControl.Fields.EnableVmxon) {
    LOG_ERROR("VMX features are not enabled.");
    return false;
  }

  return true;
}
Esempio n. 5
0
File: vcpu.c Progetto: HideSand/ksm
static inline bool enter_vmx(struct vmcs *vmxon)
{
	/* If we're running nested on a hypervisor that does not
	 * support VT-x, this will cause #GP.  */
	u64 cr0 = __readcr0();
	cr0 &= __readmsr(MSR_IA32_VMX_CR0_FIXED1);
	cr0 |= __readmsr(MSR_IA32_VMX_CR0_FIXED0);
	__writecr0(cr0);

	u64 cr4 = __readcr4();
	cr4 &= __readmsr(MSR_IA32_VMX_CR4_FIXED1);
	cr4 |= __readmsr(MSR_IA32_VMX_CR4_FIXED0);
	__writecr4(cr4);

	u64 vmx = __readmsr(MSR_IA32_VMX_BASIC);
	vmxon->revision_id = (u32)vmx;

	/* Enter VMX root operation  */
	uintptr_t pa = __pa(vmxon);
	if (__vmx_on(&pa))
		return false;

	/* This is necessary here or just before we exit the VM,
	 * we do it here as it's easier.  */
	__invept_all();
	return true;
}
Esempio n. 6
0
VOID
NTAPI
KsecReadMachineSpecificCounters(
    _Out_ PKSEC_MACHINE_SPECIFIC_COUNTERS MachineSpecificCounters)
{
#if defined(_M_IX86) || defined(_M_AMD64)
    /* Check if RDTSC is available */
    if (ExIsProcessorFeaturePresent(PF_RDTSC_INSTRUCTION_AVAILABLE))
    {
        /* Read the TSC value */
        MachineSpecificCounters->Tsc = __rdtsc();
    }

    /* Read the CPU event counter MSRs */
    MachineSpecificCounters->Ctr0 = __readmsr(0x12);
    MachineSpecificCounters->Ctr1 = __readmsr(0x13);

    /* Check if this is an MMX capable CPU */
    if (ExIsProcessorFeaturePresent(PF_MMX_INSTRUCTIONS_AVAILABLE))
    {
        /* Read the CPU performance counters 0 and 1 */
        MachineSpecificCounters->Pmc0 = __readpmc(0);
        MachineSpecificCounters->Pmc1 = __readpmc(1);
    }
#else
    #error Implement me!
#endif
}
Esempio n. 7
0
File: ksm.c Progetto: HideSand/ksm
static NTSTATUS set_lock_bit(void)
{
	uintptr_t feat_ctl = __readmsr(MSR_IA32_FEATURE_CONTROL);
	if (feat_ctl & FEATURE_CONTROL_LOCKED)
		return STATUS_SUCCESS;

	__writemsr(MSR_IA32_FEATURE_CONTROL, feat_ctl | FEATURE_CONTROL_LOCKED);
	feat_ctl = __readmsr(MSR_IA32_FEATURE_CONTROL);
	if (feat_ctl & FEATURE_CONTROL_LOCKED)
		return STATUS_SUCCESS;

	return STATUS_HV_ACCESS_DENIED;
}
Esempio n. 8
0
_Use_decl_annotations_ EXTERN_C NTSTATUS
DriverEntry(PDRIVER_OBJECT DriverObject, PUNICODE_STRING RegistryPath) {
  UNREFERENCED_PARAMETER(RegistryPath);
  PAGED_CODE();

  auto status = STATUS_UNSUCCESSFUL;
  DriverObject->DriverUnload = DriverUnload;

  DBG_BREAK();

  status = LogInitialization(LOG_LEVEL, nullptr);
  if (!NT_SUCCESS(status)) {
    return status;
  }

  // Build the following code as a SYSENTER handler on NonPagedPool
  //
  // FF 25 00 00 00 00                       jmp     cs:jmp_address
  // FF FF FF FF FF FF FF FF jmp_address     dq 0FFFFFFFFFFFFFFFFh
  const JMP_CODE jmpCode = {{0xff, 0x25}, __readmsr(IA32_LSTAR)};

  g_Trampoline = reinterpret_cast<UCHAR*>(ExAllocatePoolWithTag(
      NonPagedPoolExecute, sizeof(jmpCode), POOL_TAG_NAME));
  if (!g_Trampoline) {
    LogTermination();
    return STATUS_MEMORY_NOT_ALLOCATED;
  }
  RtlCopyMemory(g_Trampoline, &jmpCode, sizeof(jmpCode));

  // Modify MSR
  UtilForEachProcessor(MsrHookCallback, nullptr);
  return status;
}
Esempio n. 9
0
NTSTATUS
BlpTimeCalibratePerformanceCounter (
    VOID
    )
{
    INT CpuInfo[4];

    /* Check if the ISVM bit it set, meaning we're in a hypervisor */
    __cpuid(CpuInfo, 1);
    if (CpuInfo[2] & 0x80000000)
    {
        /* Get the Hypervisor Identification Leaf */
        __cpuid(CpuInfo, 0x40000001);

        /* Is this Hyper-V? */
        if (CpuInfo[0] == '1#vH')
        {
            /* Get the Hypervisor Feature Identification Leaf */
            __cpuid(CpuInfo, 0x40000003);

            /* Check if HV_X64_MSR_REFERENCE_TSC is present */
            if (CpuInfo[3] & 0x100)
            {
                /* Read the TSC frequency from the MSR */
                BlpTimePerformanceFrequency = __readmsr(0x40000022);
                EfiPrintf(L"Using Hyper-V frequency as: %I64d\r\n", BlpTimePerformanceFrequency);
                return STATUS_SUCCESS;
            }
        }
    }

    /* On other systems, compute it */
    return BlpTimeMeasureTscFrequency();
}
Esempio n. 10
0
File: ksm.c Progetto: HideSand/ksm
NTSTATUS ksm_init(void)
{
	NTSTATUS status;
#ifndef DBG
	/*  This prevents loading in a nested environment.  */
	int info[4];
	__cpuid(info, 1);
	if (!(info[2] & (1 << 16)))
		return STATUS_HV_CPUID_FEATURE_VALIDATION_ERROR;

	if (__readcr4() & X86_CR4_VMXE)
		return STATUS_HV_FEATURE_UNAVAILABLE;
#endif

	if (!ept_check_capabilitiy())
		return STATUS_HV_FEATURE_UNAVAILABLE;

	if (!(__readmsr(MSR_IA32_FEATURE_CONTROL) & FEATURE_CONTROL_VMXON_ENABLED_OUTSIDE_SMX))
		return STATUS_HV_FEATURE_UNAVAILABLE;

	ksm.hotplug_cpu = KeRegisterProcessorChangeCallback(ksm_hotplug_cpu, &status, 0);
	if (!ksm.hotplug_cpu)
		return status;

	status = init_msr_bitmap(&ksm);
	if (!NT_SUCCESS(status))
		return status;

	/* Caller cr3 (could be user)  */
	ksm.origin_cr3 = __readcr3();
	ksm_init_phi_list();

	STATIC_CALL_DPC(__call_init, &ksm);
	return STATIC_DPC_RET();
}
Esempio n. 11
0
// __asm__ blocks are only checked for inline functions that end up being
// emitted, so call functions with __asm__ blocks to make sure their inline
// assembly parses.
void f() {
  __movsb(0, 0, 0);
  __movsd(0, 0, 0);
  __movsw(0, 0, 0);

  __stosd(0, 0, 0);
  __stosw(0, 0, 0);

#ifdef _M_X64
  __movsq(0, 0, 0);
  __stosq(0, 0, 0);
#endif

  int info[4];
  __cpuid(info, 0);
  __cpuidex(info, 0, 0);
  _xgetbv(0);
  __halt();
  __nop();
  __readmsr(0);

  // FIXME: Call these in 64-bit too once the intrinsics have been fixed to
  // work there, PR19301
#ifndef _M_X64
  __readcr3();
  __writecr3(0);
#endif

#ifdef _M_ARM
  __dmb(_ARM_BARRIER_ISHST);
#endif
}
Esempio n. 12
0
_Use_decl_annotations_ EXTERN_C static bool VminitpInitializeVMCS(
    PER_PROCESSOR_DATA *ProcessorData) {
  // Write a VMCS revision identifier
  IA32_VMX_BASIC_MSR vmxBasicMsr = {__readmsr(IA32_VMX_BASIC)};
  ProcessorData->VmcsRegion->RevisionIdentifier =
      vmxBasicMsr.Fields.RevisionIdentifier;

  auto vmcsRegionPA = MmGetPhysicalAddress(ProcessorData->VmcsRegion);

  // It stores the value FFFFFFFF_FFFFFFFFH if there is no current VMCS
  if (__vmx_vmclear(
          reinterpret_cast<unsigned long long *>(&vmcsRegionPA.QuadPart))) {
    return false;
  }

  // Software makes a VMCS current by executing VMPTRLD with the address
  // of the VMCS; that address is loaded into the current-VMCS pointer.
  if (__vmx_vmptrld(
          reinterpret_cast<unsigned long long *>(&vmcsRegionPA.QuadPart))) {
    return false;
  }

  // The launch state of current VMCS is "clear"
  return true;
}
ULONGLONG GetKeServiceDescriptorTableShadow64()
{
#if 1
    PUCHAR StartSearchAddress = (PUCHAR)__readmsr(0xC0000082);
    PUCHAR EndSearchAddress = StartSearchAddress + 0x500;
    PUCHAR i = NULL;
    UCHAR b1=0,b2=0,b3=0;
    ULONG templong=0;
    ULONGLONG addr=0;
#if DBG
    //SetSoftBreakPoint();
#endif 

    for(i=StartSearchAddress;i<EndSearchAddress;i++)
    {
        if( MmIsAddressValid(i) && MmIsAddressValid(i+1) && MmIsAddressValid(i+2) )
        {
            b1=*i;
            b2=*(i+1);
            b3=*(i+2);
            if( b1==0x4c && b2==0x8d && b3==0x1d ) //4c8d1d
            {
                memcpy(&templong,i+3,4);
                addr = (ULONGLONG)templong + (ULONGLONG)i + 7;
                return addr;
            }
        }
    }
#endif 
    return 0;
}
Esempio n. 14
0
File: ept.c Progetto: HideSand/ksm
static bool setup_pml4(uintptr_t *pml4)
{
	PPHYSICAL_MEMORY_RANGE pm_ranges = MmGetPhysicalMemoryRanges();
	bool ret = false;

	for (int run = 0;; ++run) {
		uintptr_t base_addr = pm_ranges[run].BaseAddress.QuadPart;
		uintptr_t bytes = pm_ranges[run].NumberOfBytes.QuadPart;
		if (!base_addr || !bytes)
			break;

		uintptr_t nr_pages = BYTES_TO_PAGES(bytes);
		for (uintptr_t page = 0; page < nr_pages; ++page) {
			uintptr_t page_addr = base_addr + page * PAGE_SIZE;
			uintptr_t *entry = ept_alloc_page(NULL, pml4, EPT_ACCESS_ALL, page_addr);
			if (!entry)
				goto out;
		}
	}

	/* Allocate APIC page  */
	ret = !!ept_alloc_page(NULL, pml4, EPT_ACCESS_ALL, __readmsr(MSR_IA32_APICBASE) & MSR_IA32_APICBASE_BASE);

out:
	ExFreePool(pm_ranges);
	return ret;
}
Esempio n. 15
0
VOID
NTAPI
HalpInitializeClock(VOID)
{
    //PKPRCB Prcb = KeGetCurrentPrcb();
    ULONG Increment;
    USHORT RollOver;
    ULONG Flags = 0;

    /* Get increment and rollover for the largest time clock ms possible */
    Increment = HalpRolloverTable[HalpLargestClockMS - 1].HighPart;
    RollOver = (USHORT)HalpRolloverTable[HalpLargestClockMS - 1].LowPart;

    /* Set the maximum and minimum increment with the kernel */
    HalpCurrentTimeIncrement = Increment;
    KeSetTimeIncrement(Increment, HalpRolloverTable[0].HighPart);

    /* Disable interrupts */
    Flags = __readmsr();
    _disable();

    /* Set the rollover */
    __outbyte(TIMER_CONTROL_PORT, TIMER_SC0 | TIMER_BOTH | TIMER_MD2);
    __outbyte(TIMER_DATA_PORT0, RollOver & 0xFF);
    __outbyte(TIMER_DATA_PORT0, RollOver >> 8);

    /* Restore interrupts if they were previously enabled */
    __writemsr(Flags);

    /* Save rollover and return */
    HalpCurrentRollOver = RollOver;
}
Esempio n. 16
0
/// <summary>
/// Check if VT-x is supported
/// </summary>
/// <returns>TRUE if supported</returns>
BOOLEAN VmxHardSupported()
{
    CPUID data = { 0 };

    // VMX bit
    __cpuid( (int*)&data, 1 );
    if ((data.ecx & (1 << 5)) == 0)
        return FALSE;

    IA32_FEATURE_CONTROL_MSR Control = { 0 };
    Control.All = __readmsr( MSR_IA32_FEATURE_CONTROL );

    // BIOS lock check
    if (Control.Fields.Lock == 0)
    {
        Control.Fields.Lock = TRUE;
        Control.Fields.EnableVmxon = TRUE;
        __writemsr( MSR_IA32_FEATURE_CONTROL, Control.All );
    }
    else if (Control.Fields.EnableVmxon == FALSE)
    {
        DPRINT( "HyperBone: CPU %d: %s: VMX locked off in BIOS\n", CPU_IDX, __FUNCTION__ );
        return FALSE;
    }

    return TRUE;
}
Esempio n. 17
0
int check_vmx_support(void)
{
	int info[4];
	uint64_t u;

	/* CPUID: input in eax = 1. */

	__cpuid(info, 1);

	/* CPUID: output in ecx, VT available? */

	if ((info[2] & 0x00000020) == 0) {
		return -1;
	}

	/* Fail if feature is locked and vmx is off. */

	u = __readmsr(IA32_MSR_FEATURE_CONTROL);

	if (((u & 0x01) != 0) && ((u & 0x04) == 0)) {
		return -1;
	}

	return 0;
}
Esempio n. 18
0
File: ksm.c Progetto: HideSand/ksm
static NTSTATUS init_msr_bitmap(struct ksm *k)
{
	void *msr_bitmap = ExAllocatePool(NonPagedPoolNx, PAGE_SIZE);
	if (!msr_bitmap)
		return STATUS_NO_MEMORY;

	k->msr_bitmap = msr_bitmap;
	RtlZeroMemory(msr_bitmap, PAGE_SIZE);

	/* For all MSRs...  */
	u8 *bitmap_read_lo = (u8 *)msr_bitmap;
	u8 *bitmap_read_hi = bitmap_read_lo + 1024;
	memset(bitmap_read_lo, 0xff, 1024);		// 0 -> 1fff
	memset(bitmap_read_hi, 0xff, 1024);		// c0000000 - c0001fff

	/* ... ignore MSR_IA32_MPERF and MSR_IA32_APERF  */
	RTL_BITMAP bitmap_read_lo_hdr;
	RtlInitializeBitMap(&bitmap_read_lo_hdr, (PULONG)bitmap_read_lo, 1024 * CHAR_BIT);
	RtlClearBits(&bitmap_read_lo_hdr, MSR_IA32_MPERF, 2);

	for (u32 msr = 0; msr < PAGE_SIZE; ++msr) {
		__try {
			__readmsr(msr);
		} __except (EXCEPTION_EXECUTE_HANDLER)
		{
			RtlClearBits(&bitmap_read_lo_hdr, msr, 1);
		}
	}

	/* ... and ignore MSR_IA32_GS_BASE and MSR_IA32_KERNEL_GS_BASE  */
	RTL_BITMAP bitmap_read_hi_hdr;
	RtlInitializeBitMap(&bitmap_read_hi_hdr, (PULONG)bitmap_read_hi, 1024 * CHAR_BIT);
	RtlClearBits(&bitmap_read_hi_hdr, 0x101, 2);
	return STATUS_SUCCESS;
}
Esempio n. 19
0
VOID 
GetKeServiceDescriptorTable()
{
	PUCHAR StartSearchAddress = (PUCHAR)__readmsr(0xC0000082);
	PUCHAR EndSearchAddress = StartSearchAddress + 0x500;
	PUCHAR i = NULL;
	UCHAR b1 = 0, b2 = 0, b3 = 0;
	ULONG templong = 0;
	ULONGLONG addr = 0;

	for (i = StartSearchAddress; i<EndSearchAddress; i++)
	{
		if (MmIsAddressValid(i) && MmIsAddressValid(i + 1) && MmIsAddressValid(i + 1))
		{
			b1 = *i;
			b2 = *(i + 1);
			b3 = *(i + 2);
			if (b1 == 0x4c && b2 == 0x8d && b3 == 0x15) //4c8d15
			{
				memcpy(&templong, i + 3, 4);
				addr = (ULONGLONG)templong + (ULONGLONG)i + 7;
				KeServiceDescriptortable = addr;
				return;
			}
		}
	}

	KeServiceDescriptortable = 0;
	return;
}
Esempio n. 20
0
File: cpu.c Progetto: killvxk/NT_OS
LONGLONG
FASTCALL
RDMSR(IN ULONG Register)
{
    /* Read from the MSR */
    return __readmsr(Register);
}
Esempio n. 21
0
/**
  Read data to MSR.

  @param  Index                Register index of MSR.

  @return Value read from MSR.

**/
UINT64
EFIAPI
AsmReadMsr64 (
  IN UINT32  Index
  )
{
  return __readmsr (Index);
}
Esempio n. 22
0
NTSTATUS GetDpcTimerInformation_x64(PDPC_TIMER_INFOR DpcTimerInfor)
{
	ULONG CPUNumber = KeNumberProcessors;   //系统变量
	PUCHAR CurrentKPRCBAddress = NULL;            
	PUCHAR CurrentTimerTableEntry = NULL;
	PLIST_ENTRY CurrentEntry = NULL;
	PLIST_ENTRY NextEntry = NULL;
	PULONG64    KiWaitAlways = NULL;
	PULONG64    KiWaitNever  = NULL;
	int i = 0;
	int j = 0;
	int n = 0;
	PKTIMER Timer;
	typedef struct _KTIMER_TABLE_ENTRY
	{
		ULONG64			Lock;
		LIST_ENTRY		Entry;
		ULARGE_INTEGER	Time;
	} KTIMER_TABLE_ENTRY, *PKTIMER_TABLE_ENTRY;

	for(j=0; j<CPUNumber; j++)
	{
		KeSetSystemAffinityThread(j+1);   //使当前线程运行在第一个处理器上
		CurrentKPRCBAddress=(PUCHAR)__readmsr(0xC0000101) + 0x20;
		KeRevertToUserAffinityThread();   //恢复线程运行的处理器
		
		CurrentTimerTableEntry=(PUCHAR)(*(ULONG64*)CurrentKPRCBAddress + 0x2200 + 0x200);
		FindKiWaitFunc(&KiWaitNever,&KiWaitAlways);  //找KiWaitAlways 函数的地址
		for(i=0; i<0x100; i++)
		{
			CurrentEntry = (PLIST_ENTRY)(CurrentTimerTableEntry + sizeof(KTIMER_TABLE_ENTRY) * i + 8);
			NextEntry = CurrentEntry->Blink;
			if( MmIsAddressValid(CurrentEntry) && MmIsAddressValid(CurrentEntry) )
			{
				while( NextEntry != CurrentEntry )
				{
					PKDPC RealDpc;
					//获得首地址
					Timer = CONTAINING_RECORD(NextEntry,KTIMER,TimerListEntry);
					RealDpc=TransTimerDpcEx(Timer,*KiWaitNever,*KiWaitAlways);
					if( MmIsAddressValid(Timer)&&MmIsAddressValid(RealDpc)&&MmIsAddressValid(RealDpc->DeferredRoutine))
					{				
						if (DpcTimerInfor->ulCnt > DpcTimerInfor->ulRetCnt)
						{
							DpcTimerInfor->DpcTimer[n].Dpc = (ULONG64)RealDpc;
							DpcTimerInfor->DpcTimer[n].Period = Timer->Period;
							DpcTimerInfor->DpcTimer[n].TimeDispatch = (ULONG64)RealDpc->DeferredRoutine;
							DpcTimerInfor->DpcTimer[n].TimerObject = (ULONG64)Timer;
							n++;
						}					
						DpcTimerInfor->ulRetCnt++;					
					}
					NextEntry = NextEntry->Blink;
				}
			}
		}
	}
}
Esempio n. 23
0
File: vcpu.c Progetto: HideSand/ksm
static bool init_vmcs(struct vmcs *vmcs)
{
	u64 vmx = __readmsr(MSR_IA32_VMX_BASIC);
	vmcs->revision_id = (u32)vmx;

	uintptr_t pa = __pa(vmcs);
	if (__vmx_vmclear(&pa))
		return false;

	return __vmx_vmptrld(&pa) == 0;
}
Esempio n. 24
0
NTSTATUS AllocateVmxProcessorData(PVOID *VirtualAddress, PHYSICAL_ADDRESS *PhysicalAddress, SIZE_T *Size)
{
	if (!VirtualAddress || !PhysicalAddress || !Size)
		return STATUS_INVALID_PARAMETER;

	//
	// Read the MSR information to get the base size
	// Default to 4096 bytes
	//
	VMX_BASIC_MSR msr;
	TO_ULL(msr) = __readmsr(MSR_IA32_VMX_BASIC);

	if (*Size <= 0)
	{
		// In rare cases this isn't set (*COUGH* *VMWARE*)
		if (msr.szVmxOnRegion > 0)
			*Size = msr.szVmxOnRegion;
		else
			*Size = 0x1000;

		*Size = ROUND_TO_PAGES(*Size);
	}

	//
	// Allocate CONTIGUOUS physical memory
	// MmCached = Stored in CPU L1/L2/L3 cache if possible 
	//
	PHYSICAL_ADDRESS l1, l2, l3;

	l1.QuadPart = 0;
	l2.QuadPart = -1;
	l3.QuadPart = 0x200000;

	PVOID address = MmAllocateContiguousMemorySpecifyCache(*Size, l1, l2, l3, MmCached);

	if (!address)
		return STATUS_NO_MEMORY;

	RtlSecureZeroMemory(address, *Size);

	//
	// Set the revision id
	//
	*(ULONG *)address = msr.RevId;

	//
	// Done
	//
	*VirtualAddress	 = address;
	*PhysicalAddress = MmGetPhysicalAddress(address);

	return STATUS_SUCCESS;
}
Esempio n. 25
0
ULONG32 VmxAdjustControls (
                           ULONG32 Ctl,
                           ULONG32 Msr
                           )
{
    LARGE_INTEGER MsrValue;

    MsrValue.QuadPart = __readmsr (Msr);
    Ctl &= MsrValue.HighPart;     /* bit == 0 in high word ==> must be zero */
    Ctl |= MsrValue.LowPart;      /* bit == 1 in low word  ==> must be one  */
    return Ctl;
}
Esempio n. 26
0
/// <summary>
/// Check various VMX features: ETP, VPID, VMFUNC, etc.
/// </summary>
VOID VmxCheckFeatures()
{
    IA32_VMX_BASIC_MSR basic = { 0 };
    IA32_VMX_PROCBASED_CTLS_MSR ctl = { 0 };
    IA32_VMX_PROCBASED_CTLS2_MSR ctl2 = { 0 };
    IA32_VMX_EPT_VPID_CAP_MSR vpidcap = { 0 };

    // True MSRs
    basic.All = __readmsr( MSR_IA32_VMX_BASIC );
    g_Data->Features.TrueMSRs = basic.Fields.VmxCapabilityHint;

    // Secondary control
    ctl.All = __readmsr( MSR_IA32_VMX_PROCBASED_CTLS );
    g_Data->Features.SecondaryControls = ctl.Fields.ActivateSecondaryControl;

    if (ctl.Fields.ActivateSecondaryControl)
    {
        // EPT, VPID, VMFUNC
        ctl2.All = __readmsr( MSR_IA32_VMX_PROCBASED_CTLS2 );
        g_Data->Features.EPT  = ctl2.Fields.EnableEPT;
        g_Data->Features.VPID = ctl2.Fields.EnableVPID;
        g_Data->Features.VMFUNC = ctl2.Fields.EnableVMFunctions;

        if (ctl2.Fields.EnableEPT != 0)
        {
            // Execute only
            vpidcap.All = __readmsr( MSR_IA32_VMX_EPT_VPID_CAP );
            g_Data->Features.ExecOnlyEPT = vpidcap.Fields.ExecuteOnly;
            g_Data->Features.InvSingleAddress = vpidcap.Fields.IndividualAddressInvVpid;

            if (vpidcap.Fields.ExecuteOnly == 0)
                DPRINT( "HyperBone: CPU %d: %s: No execute-only EPT translation support\n", CPU_IDX, __FUNCTION__ );
        }
        else
            DPRINT( "HyperBone: CPU %d: %s: No EPT/VPID support\n", CPU_IDX, __FUNCTION__ );
    }
    else
        DPRINT( "HyperBone: CPU %d: %s: No secondary contol support\n", CPU_IDX, __FUNCTION__ );
}
Esempio n. 27
0
// Modify or restore MSR
_Use_decl_annotations_ EXTERN_C static NTSTATUS MsrHookCallback(void* Context) {
  UNREFERENCED_PARAMETER(Context);

  auto oldmsr = &g_MSRs[KeGetCurrentProcessorNumber()];
  if (*oldmsr == 0) {
    // Modify
    *oldmsr = __readmsr(IA32_LSTAR);
    __writemsr(IA32_LSTAR, reinterpret_cast<ULONG_PTR>(g_Trampoline));
    LOG_INFO("MSR(%08x) %p => %p", IA32_LSTAR, *oldmsr, g_Trampoline);
  } else {
    // Restore
    __writemsr(IA32_LSTAR, *oldmsr);
    LOG_INFO("MSR(%08x) %p => %p", IA32_LSTAR, g_Trampoline, *oldmsr);
  }
  return STATUS_SUCCESS;
}
Esempio n. 28
0
File: mu.c Progetto: B-Rich/coreboot
//----------------------------------------------------------------------------
// MemUFlushPattern:
//
//  Flush a pattern of 72 bit times (per DQ) from cache.  This procedure is used
//  to ensure cache miss on the next read training.
//
//              In: Address   - Physical address to be flushed
//                  ClCount   - number of cachelines to be flushed
//FUNC_ATTRIBUTE(noinline)
VOID
MemUFlushPattern (
  IN       UINT32 Address,
  IN       UINT16 ClCount
  )
{
  UINTN Index;

  // ssd - theory: a tlb flush is needed to avoid problems with clflush
  __writemsr (0x20F, __readmsr (0x20F));

  for (Index = 0; Index < ClCount; Index++) {
    // mfence prevents speculative execution of the clflush
    _mm_mfence ();
    _mm_clflush_fs ((void *) (size_t) (Address + Index * 64));
  }
}
Esempio n. 29
0
void save_cpu_state(mon_guest_cpu_startup_state_t *s)
{
	ia32_gdtr_t gdtr;
	ia32_idtr_t idtr;
	ia32_selector_t sel;
	ia32_segment_descriptor_t *desc;

	s->size_of_this_struct = sizeof(mon_guest_cpu_startup_state_t);
	s->version_of_this_struct = MON_GUEST_CPU_STARTUP_STATE_VERSION;

	__readgdtr(&gdtr);
	__sidt(&idtr);
	s->control.gdtr.base = (uint64_t)gdtr.base;
	s->control.gdtr.limit = (uint32_t)gdtr.limit;
	s->control.idtr.base = (uint64_t)idtr.base;
	s->control.idtr.limit = (uint32_t)idtr.limit;
	s->control.cr[IA32_CTRL_CR0] = __readcr0();
	s->control.cr[IA32_CTRL_CR2] = __readcr2();
	s->control.cr[IA32_CTRL_CR3] = __readcr3();
	s->control.cr[IA32_CTRL_CR4] = __readcr4();

	s->msr.msr_sysenter_cs = (uint32_t)__readmsr(IA32_MSR_SYSENTER_CS);
	s->msr.msr_sysenter_eip = __readmsr(IA32_MSR_SYSENTER_EIP);
	s->msr.msr_sysenter_esp = __readmsr(IA32_MSR_SYSENTER_ESP);
	s->msr.msr_efer = __readmsr(IA32_MSR_EFER);
	s->msr.msr_pat = __readmsr(IA32_MSR_PAT);
	s->msr.msr_debugctl = __readmsr(IA32_MSR_DEBUGCTL);
	s->msr.pending_exceptions = 0;
	s->msr.interruptibility_state = 0;
	s->msr.activity_state = 0;
	s->msr.smbase = 0;

	sel.sel16 = __readldtr();

	if (sel.bits.index != 0) {
		return;
	}

	s->seg.segment[IA32_SEG_LDTR].attributes = 0x00010000;
	s->seg.segment[IA32_SEG_TR].attributes = 0x0000808b;
	s->seg.segment[IA32_SEG_TR].limit = 0xffffffff;
	save_segment_data((uint16_t)__readcs(), &s->seg.segment[IA32_SEG_CS]);
	save_segment_data((uint16_t)__readds(), &s->seg.segment[IA32_SEG_DS]);
	save_segment_data((uint16_t)__reades(), &s->seg.segment[IA32_SEG_ES]);
	save_segment_data((uint16_t)__readfs(), &s->seg.segment[IA32_SEG_FS]);
	save_segment_data((uint16_t)__readgs(), &s->seg.segment[IA32_SEG_GS]);
	save_segment_data((uint16_t)__readss(), &s->seg.segment[IA32_SEG_SS]);
	return;
}
Esempio n. 30
0
/*
 * @implemented
 */
VOID
NTAPI
HalCalibratePerformanceCounter(IN volatile PLONG Count,
                               IN ULONGLONG NewCount)
{
    ULONG Flags = 0;

    /* Disable interrupts */
    Flags = __readmsr();
    _disable();

    /* Do a decrement for this CPU */
    _InterlockedDecrement(Count);

    /* Wait for other CPUs */
    while (*Count);

    /* Restore interrupts if they were previously enabled */
    __writemsr(Flags);
}