void fixInterruptabilityState(void) { DWORD is=vmread(vm_guest_interruptability_state); RFLAGS guestrflags; VMEntry_interruption_information entryinterrupt; guestrflags.value=vmread(vm_guest_rflags); is=is & 0x1f; //remove reserved bits if (guestrflags.IF==0) //block by sti may not be enabled when IF is 0 is=is & 0x1e; //disable block by sti if ((is & 3)==3) { //both block by STI and block by mov ss are enabled is=is & 0x1e; //disable block by sti } entryinterrupt.interruption_information=vmread(vm_entry_interruptioninfo); if (entryinterrupt.valid) { if (entryinterrupt.type==0) //external interrupt entry must have the both sti and ss to 0 is = is & 0x1c; if (entryinterrupt.type==2) //nmi is = is & 0x1d; //disable blick by ss } vmwrite(vm_guest_interruptability_state, is); }
//-- hypervisor core -- // ULONG_PTR CHyperVisor::HVEntryPoint( __inout ULONG_PTR reg[0x10], __in VOID* param ) { (*(void (*)(ULONG_PTR*, const void*))(m_callback))(reg, param); ULONG_PTR ExitReason; vmread(VMX_VMCS32_RO_EXIT_REASON, &ExitReason); ULONG_PTR ExitInstructionLength; vmread(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &ExitInstructionLength); ULONG_PTR GuestEIP; vmread(VMX_VMCS64_GUEST_RIP, &GuestEIP); vmwrite(VMX_VMCS64_GUEST_RIP, GuestEIP + ExitInstructionLength); if((ExitReason > VMX_EXIT_VMCALL) && (ExitReason <= VMX_EXIT_VMXON)) { ULONG_PTR GuestRFLAGS; vmread(VMX_VMCS_GUEST_RFLAGS, &GuestRFLAGS); vmwrite(VMX_VMCS_GUEST_RFLAGS, GuestRFLAGS & (~0x8d5) | 0x1); } if (VMX_EXIT_CRX_MOVE == ExitReason) HandleCrxAccess(reg); return m_hvCallbacks[ExitReason]; }
void vmx_vm_exit_reason (void) { uint32 reason = vmread (VMXENC_EXIT_REASON); uint32 qualif = vmread (VMXENC_EXIT_QUAL); uint32 intinf = vmread (VMXENC_VM_EXIT_INTERRUPT_INFO); uint32 ercode = vmread (VMXENC_VM_EXIT_INTERRUPT_ERRCODE); /******************************************************* * uint32 inslen = vmread (VMXENC_VM_EXIT_INSTR_LEN); * * uint32 insinf = vmread (VMXENC_VM_EXIT_INSTR_INFO); * *******************************************************/ uint8 crnum, type, reg, vec; switch (reason) { case 0x0: /* Exception or NMI */ if (intinf & 0x80000000) { char *cause; vec = intinf & 0xFF; type = (intinf & 0x700) >> 8; switch (type) { case 0: cause = "external interrupt"; break; case 2: cause = "NMI"; break; case 3: cause = "hardware exception"; break; case 6: cause = "software exception"; break; default: cause = "unknown"; break; } com1_printf (" EXCEPTION: vector=%.2X code=%X cause=%s\n", vec, (intinf & 0x800) ? ercode : 0, cause); if (vec == 0xE && type == 3) { /* Page fault */ com1_printf (" Page Fault at %.8X\n", qualif); } } break; case 0x1C: /* Control Register access */ crnum = qualif & 0xF; type = (qualif & 0x30) >> 4; reg = (qualif & 0xF00) >> 8; switch (type) { case 0: com1_printf (" CR WRITE: MOV %%%s, %%CR%d\n", vmx_cr_access_register_names[reg], crnum); break; case 1: com1_printf (" CR READ: MOV %%CR%d, %%%s\n", crnum, vmx_cr_access_register_names[reg]); break; case 2: com1_printf (" CLTS\n"); break; case 3: com1_printf (" LMSW\n"); break; } break; }
int getpid(pcpuinfo currentcpuinfo) { //if xp sp2-32bit //[[kernel fs:124]+1ec]=pid //if (os==0) int notpaged=0; ULONG fsbase=vmread(0x680e); ULONG *part1=(ULONG *)(UINT64)MapPhysicalMemory(getPhysicalAddressVM(currentcpuinfo, fsbase+0x124, ¬paged), currentcpuinfo->AvailableVirtualAddress); ULONG *part2; if (notpaged) { part2=(ULONG *)(UINT64)MapPhysicalMemory(getPhysicalAddressVM(currentcpuinfo, *part1+0x1ec, ¬paged), currentcpuinfo->AvailableVirtualAddress); if (notpaged) return 0; else return *part2; } else return 0; }
void ultimap_setup(pcpuinfo currentcpuinfo, QWORD CR3, QWORD DEBUGCTL, QWORD DS_AREA) { currentcpuinfo->Ultimap.CR3=CR3; currentcpuinfo->Ultimap.DEBUGCTL=DEBUGCTL; currentcpuinfo->Ultimap.DS_AREA=DS_AREA; currentcpuinfo->Ultimap.Active=1; currentcpuinfo->Ultimap.OriginalDebugCTL=vmread(vm_guest_IA32_DEBUGCTL); currentcpuinfo->Ultimap.OriginalDS_AREA=readMSR(IA32_DS_AREA); }
void CHyperVisor::HandleCrxAccess( __inout ULONG_PTR reg[0x10] ) { ULONG_PTR ExitQualification; vmread(VMX_VMCS_RO_EXIT_QUALIFICATION, &ExitQualification); ULONG_PTR cr = (ExitQualification & 0x0000000F); ULONG_PTR operand = (ExitQualification & 0x00000040) >> 6; if (3 == cr && 0 == operand) { ULONG_PTR acess = (ExitQualification & 0x00000030) >> 4; ULONG_PTR r64 = (ExitQualification & 0x00000F00) >> 8; if (1 == acess) vmread(VMX_VMCS64_GUEST_CR3, ®[r64]); else if (0 == acess) vmwrite(VMX_VMCS64_GUEST_CR3, reg[r64]); }
bool vmcs_intel_x64::check_control_virtual_nmi_and_nmi_window() { auto controls1 = vmread(VMCS_PIN_BASED_VM_EXECUTION_CONTROLS); auto controls2 = vmread(VMCS_PRIMARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); if ((controls1 & VM_EXEC_PIN_BASED_NMI_EXITING) != 0 && (controls2 & VM_EXEC_P_PROC_BASED_NMI_WINDOW_EXITING) != 0) { std::cout << "check_control_nmi_exiting_and_virtual_nmi failed: " << "if nmi exiting is 0, virtual nmi must be 0" << std::hex << " - pin controls: 0x" << controls1 << std::endl << " - proc controls: 0x" << controls2 << std::endl << std::dec; return false; } return true; }
static vmx_error_t cpu_vmentry_failed(struct vcpu_t *vcpu, vmx_error_t err) { hax_debug("HAX: VM entry failed: err=%lx RIP=%08lx\n", err, (mword)vmread(vcpu, GUEST_RIP)); //dump_vmcs(); if (err == VMX_FAIL_VALID) { hax_log("HAX: Prev exit: %llx error code: %llx\n", vmread(vcpu, VM_EXIT_INFO_REASON), vmread(vcpu, VMX_INSTRUCTION_ERROR_CODE)); } else { hax_log("HAX: Prev exit: %llx no error code\n", vmread(vcpu, VM_EXIT_INFO_REASON)); } hax_log("HAX: VM entry failed\n"); hax_log("end of cpu_vmentry_failed\n"); return err; }
bool vmcs_intel_x64::check_control_activate_and_save_premeption_timer_must_be_0() { auto controls1 = vmread(VMCS_PIN_BASED_VM_EXECUTION_CONTROLS); auto controls2 = vmread(VMCS_VM_EXIT_CONTROLS); if ((controls1 & VM_EXEC_PIN_BASED_ACTIVATE_VMX_PREEMPTION_TIMER) != 0 && (controls2 & VM_EXIT_CONTROL_SAVE_VMX_PREEMPTION_TIMER_VALUE) != 0) { std::cout << "check_control_activate_and_save_premeption_timer_must_be_0 failed: " << "if activate preempt timer is 0, save preempt timer must also be 0" << std::hex << " - pin controls: 0x" << controls1 << std::endl << " - exit controls: 0x" << controls2 << std::endl << std::dec; return false; } return true; }
void ultimap_handleCR3Change(pcpuinfo currentcpuinfo, QWORD oldcr3, QWORD newcr3) /* * Called when cr3 changes and ultimap is active */ { currentcpuinfo->Ultimap.CR3_switchcount++; if (oldcr3 != newcr3) { if (currentcpuinfo->Ultimap.CR3==newcr3) //if the new cr3 is the process to watch { currentcpuinfo->Ultimap.CR3_switchcount2++; currentcpuinfo->Ultimap.LastOldCR3=oldcr3; currentcpuinfo->Ultimap.LastNewCR3=newcr3; //set the MSR values currentcpuinfo->Ultimap.OriginalDebugCTL=vmread(vm_guest_IA32_DEBUGCTL); currentcpuinfo->Ultimap.OriginalDS_AREA=readMSR(IA32_DS_AREA); vmwrite(vm_guest_IA32_DEBUGCTL, currentcpuinfo->Ultimap.DEBUGCTL); writeMSR(IA32_DS_AREA, currentcpuinfo->Ultimap.DS_AREA); //and register a vm-exit event on MSR read/write for DEBUGCTL and DS_AREA MSRBitmap[IA32_DS_AREA/8]|=1 << (IA32_DS_AREA % 8); MSRBitmap[1024+IA32_DS_AREA/8]|=1 << (IA32_DS_AREA % 8); MSRBitmap[IA32_DEBUGCTL_MSR/8]|=1 << (IA32_DEBUGCTL_MSR % 8); MSRBitmap[1024+IA32_DEBUGCTL_MSR/8]|=1 << (IA32_DEBUGCTL_MSR % 8); } else if (currentcpuinfo->Ultimap.CR3==currentcpuinfo->guestCR3) //if the old cr3 is the process to watch and is switched out to a different one { //unset the MSR values vmwrite(vm_guest_IA32_DEBUGCTL, currentcpuinfo->Ultimap.OriginalDebugCTL); writeMSR(IA32_DS_AREA, currentcpuinfo->Ultimap.OriginalDS_AREA); //and unregister the vm-exit event on MSR read/write for DEBUGCTL and DS_AREA MSRBitmap[IA32_DS_AREA/8]&=~(1 << (IA32_DS_AREA % 8)); MSRBitmap[1024+IA32_DS_AREA/8]&=~(1 << (IA32_DS_AREA % 8)); MSRBitmap[IA32_DEBUGCTL_MSR/8]&=~(1 << (IA32_DEBUGCTL_MSR % 8)); MSRBitmap[1024+IA32_DEBUGCTL_MSR/8]&=~(1 << (IA32_DEBUGCTL_MSR % 8)); } } }
void ultimap_handleDB(pcpuinfo currentcpuinfo) /* * Called when an int1 happens */ { currentcpuinfo->Ultimap.OriginalDebugCTL&=~1; //disable the LBR flag on bp if (currentcpuinfo->guestCR3 != currentcpuinfo->Ultimap.CR3) vmwrite(vm_guest_IA32_DEBUGCTL, vmread(vm_guest_IA32_DEBUGCTL) & ~1); else vmwrite(vm_guest_IA32_DEBUGCTL, currentcpuinfo->Ultimap.DEBUGCTL); //just write it again }
void Arch_leaveVMAsyncTransfer(tcb_t *tcb) { #ifdef CONFIG_VTX vcpu_t *vcpu = tcb->tcbArch.vcpu; word_t *buffer; if (vcpu) { if (current_vmcs != vcpu) { vmptrld(vcpu); } setRegister(tcb, msgRegisters[0], vmread(VMX_GUEST_RIP)); setRegister(tcb, msgRegisters[1], vmread(VMX_CONTROL_PRIMARY_PROCESSOR_CONTROLS)); buffer = lookupIPCBuffer(true, tcb); if (!buffer) { return; } buffer[3] = vmread(VMX_CONTROL_ENTRY_INTERRUPTION_INFO); } #endif }
void load_vmcs_common(struct vcpu_t *vcpu) { // Update the cache for the PIN/EXIT ctls vmx(vcpu, pin_ctls) = vmx(vcpu, pin_ctls_base) = vmread( vcpu, VMX_PIN_CONTROLS); vmx(vcpu, pcpu_ctls) = vmx(vcpu, pcpu_ctls_base) = vmread( vcpu, VMX_PRIMARY_PROCESSOR_CONTROLS); vmx(vcpu, scpu_ctls) = vmx(vcpu, scpu_ctls_base) = vmx(vcpu, pcpu_ctls) & SECONDARY_CONTROLS ? vmread(vcpu, VMX_SECONDARY_PROCESSOR_CONTROLS) : 0; vmx(vcpu, exc_bitmap) = vmx(vcpu, exc_bitmap_base) = vmread( vcpu, VMX_EXCEPTION_BITMAP); vmx(vcpu, entry_ctls) = vmx(vcpu, entry_ctls_base) = vmread( vcpu, VMX_ENTRY_CONTROLS); vmx(vcpu, exit_ctls) = vmx(vcpu, exit_ctls_base) = vmread( vcpu, VMX_EXIT_CONTROLS); if (vmx(vcpu, pcpu_ctls) & IO_BITMAP_ACTIVE) { vmwrite(vcpu, VMX_IO_BITMAP_A, hax_page_pa(io_bitmap_page_a)); vmwrite(vcpu, VMX_IO_BITMAP_B, hax_page_pa(io_bitmap_page_b)); } if (vmx(vcpu, pcpu_ctls) & MSR_BITMAP_ACTIVE) vmwrite(vcpu, VMX_MSR_BITMAP, hax_page_pa(msr_bitmap_page)); if (vmx(vcpu, pcpu_ctls) & USE_TSC_OFFSETTING) vmwrite(vcpu, VMX_TSC_OFFSET, vcpu->tsc_offset); vmwrite(vcpu, GUEST_ACTIVITY_STATE, vcpu->state->_activity_state); vcpu_vmwrite_all(vcpu, 0); }
void CSysCall::HookProtectionMSR( __inout ULONG_PTR reg[0x10] ) { ULONG_PTR syscall; if (IA64_SYSENTER_EIP == reg[RCX]) { syscall = (ULONG_PTR)CSysCall::GetSysCall(CVirtualizedCpu::GetCoreId(reg)); ULONG_PTR ins_len; vmread(VMX_VMCS32_RO_EXIT_INSTR_LENGTH, &ins_len); vmread(VMX_VMCS64_GUEST_RIP, ®[RCX]);//original 'ret'-addr m_sRdmsrRips.Push(reg[RCX] - ins_len); vmwrite(VMX_VMCS64_GUEST_RIP, rdmsr_hook);//rdmsr_hook is trampolie to RdmsrHook } else { syscall = rdmsr((ULONG)reg[RCX]); } reg[RAX] = (ULONG)(syscall); reg[RDX] = (ULONG)(syscall >> (sizeof(ULONG) << 3)); }
static void vmread_cr(struct vcpu_t *vcpu) { struct vcpu_state_t *state = vcpu->state; mword cr4, cr4_mask; // Update only the bits the guest is allowed to change // This must use the actual cr0 mask, not _cr0_mask. mword cr0 = vmread(vcpu, GUEST_CR0); mword cr0_mask = vmread(vcpu, VMX_CR0_MASK); // should cache this hax_debug("vmread_cr cr0 %lx, cr0_mask %lx, state->_cr0 %llx\n", cr0, cr0_mask, state->_cr0); state->_cr0 = (cr0 & ~cr0_mask) | (state->_cr0 & cr0_mask); hax_debug("vmread_cr, state->_cr0 %llx\n", state->_cr0); // update CR3 only if guest is allowed to change it if (!(vmx(vcpu, pcpu_ctls) & CR3_LOAD_EXITING)) state->_cr3 = vmread(vcpu, GUEST_CR3); cr4 = vmread(vcpu, GUEST_CR4); cr4_mask = vmread(vcpu, VMX_CR4_MASK); // should cache this state->_cr4 = (cr4 & ~cr4_mask) | (state->_cr4 & cr4_mask); }
//------------------------------------------------------------------------ // process all imports of a pe file // returns: -1:could not read an impdir; 0-ok; // other values can be returned by the visitor inline int pe_loader_t::process_imports(linput_t *li, pe_import_visitor_t &piv) { if ( pe.impdir.rva == 0 ) return 0; if ( transvec.empty() ) process_sections(li); int code = 0; bool is_memory_linput = get_linput_type(li) == LINPUT_PROCMEM; for ( int ni=0; ; ni++ ) { off_t off = pe.impdir.rva + ni*sizeof(peimpdir_t); peimpdir_t &id = piv.id; if ( !vmread(li, off, &id, sizeof(id)) ) { int code = piv.impdesc_error(off); if ( code != 0 ) break; // we continue if the import descriptor is within the page belonging // to the program if ( !is_memory_linput ) { uint32 fsize = pe.align_up_in_file(qlsize(li)); if ( map_ea(off)+sizeof(id) > fsize ) return -1; } } if ( id.dllname == 0 && id.table1 == 0 ) break; ea_t ltable = id.table1; //OriginalFirstThunk ea_t atable = id.looktab; //FirstThunk bool ok = true; char dll[MAXSTR]; asciiz(li, id.dllname, dll, sizeof(dll), &ok); if ( !ok || dll[0] == '\0' ) break; ansi2idb(dll); if ( !is_memory_linput && (map_ea(ltable) == BADADDR || ltable < pe.hdrsize) ) ltable = atable; atable += get_imagebase(); int code = piv.visit_module(dll, atable, ltable); if ( code != 0 ) break; code = process_import_table(li, pe, atable, ltable, piv); if ( code != 0 ) break; } return code; }
bool vmcs_intel_x64::check_control_tpr_shadow_and_virtual_apic() { auto controls = vmread(VMCS_PRIMARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); if ((controls & VM_EXEC_P_PROC_BASED_USE_TPR_SHADOW) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_tpr_shadow_and_virtual_apic" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_virtual_interrupt_and_external_interrupt() { auto controls = vmread(VMCS_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); if ((controls & VM_EXEC_S_PROC_BASED_VIRTUAL_INTERRUPT_DELIVERY) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_virtual_interrupt_and_external_interrupt" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_x2apic_mode_and_virtual_apic_access() { auto controls = vmread(VMCS_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); if ((controls & VM_EXEC_S_PROC_BASED_VIRTUALIZE_X2APIC_MODE) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_x2apic_mode_and_virtual_apic_access" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_register_apic_mode_and_tpr() { auto controls = vmread(VMCS_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); if ((controls & VM_EXEC_S_PROC_BASED_APIC_REGISTER_VIRTUALIZATION) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_register_apic_mode_and_tpr" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_vpid_checks() { auto controls = vmread(VMCS_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); if ((controls & VM_EXEC_S_PROC_BASED_ENABLE_VPID) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_vpid_checks" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_msr_bitmap_address_bits() { auto controls = vmread(VMCS_PRIMARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); if ((controls & VM_EXEC_P_PROC_BASED_USE_MSR_BITMAPS) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_msr_bitmap_address_bits" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_enable_vmcs_shadowing() { auto controls = vmread(VMCS_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); // There are multiple checks that are missing here if ((controls & VM_EXEC_S_PROC_BASED_VMCS_SHADOWING) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_enable_vmcs_shadowing" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_enable_ept_violation_checks() { auto controls = vmread(VMCS_SECONDARY_PROCESSOR_BASED_VM_EXECUTION_CONTROLS); // There are multiple checks that are missing here if ((controls & VM_EXEC_S_PROC_BASED_EPT_VIOLATION_VE) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_enable_ept_violation_checks" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_event_injection_checks() { auto entry_interruption = vmread(VMCS_VM_ENTRY_INTERRUPTION_INFORMATION_FIELD); // There are multiple checks that are missing here if (entry_interruption != 0) { std::cout << "unimplemented VMCS check: " << "check_control_event_injection_checks" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_exit_msr_store_address() { auto msr_store_count = vmread(VMCS_VM_EXIT_MSR_STORE_COUNT); // There are multiple checks that are missing here if (msr_store_count != 0) { std::cout << "unimplemented VMCS check: " << "check_control_msr_store_address" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_process_posted_interrupt_checks() { auto controls = vmread(VM_EXEC_PIN_BASED_NMI_EXITING); // There are multiple checks that are missing here if ((controls & VM_EXEC_PIN_BASED_PROCESS_POSTED_INTERRUPTS) != 0) { std::cout << "unimplemented VMCS check: " << "check_control_process_posted_interrupt_checks" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_entry_msr_load_address() { auto msr_load_count = vmread(VMCS_VM_ENTRY_MSR_LOAD_COUNT); // There are multiple checks that are missing here if (msr_load_count != 0) { std::cout << "unimplemented VMCS check: " << "check_control_entry_msr_load_address" << std::endl; return false; } return true; }
bool vmcs_intel_x64::check_control_cr3_count_less_then_4() { auto cr3_target_count = vmread(VMCS_CR3_TARGET_COUNT); if (cr3_target_count > 4) { std::cout << "check_control_cr3_count_less_then_4 failed: " << "cr3 count must be between 0 - 4" << std::hex << " - cr3_target_count: 0x" << cr3_target_count << std::endl << std::dec; return false; } return true; }
bool vmcs_intel_x64::check_control_nmi_exiting_and_virtual_nmi() { auto controls = vmread(VMCS_PIN_BASED_VM_EXECUTION_CONTROLS); if ((controls & VM_EXEC_PIN_BASED_NMI_EXITING) != 0 && (controls & VM_EXEC_PIN_BASED_VIRTUAL_NMIS) != 0) { std::cout << "check_control_nmi_exiting_and_virtual_nmi failed: " << "if nmi exiting is 0, virtual nmi must be 0" << std::hex << " - controls: 0x" << controls << std::endl << std::dec; return false; } return true; }