static uint32_t vmport_cmd_ram_size(void *opaque, uint32_t addr) { X86CPU *cpu = X86_CPU(current_cpu); cpu->env.regs[R_EBX] = 0x1177; return ram_size; }
void qmp_inject_nmi(Error **errp) { #if defined(TARGET_I386) CPUState *cs; for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; if (!env->apic_state) { cpu_interrupt(cs, CPU_INTERRUPT_NMI); } else { apic_deliver_nmi(env->apic_state); } } #elif defined(TARGET_S390X) CPUState *cs; S390CPU *cpu; for (cs = first_cpu; cs != NULL; cs = cs->next_cpu) { cpu = S390_CPU(cs); if (cpu->env.cpu_num == monitor_get_cpu_index()) { if (s390_cpu_restart(S390_CPU(cs)) == -1) { error_set(errp, QERR_UNSUPPORTED); return; } break; } } #else error_set(errp, QERR_UNSUPPORTED); #endif }
void nmi_monitor_handle(int cpu_index, Error **errp) { struct do_nmi_s ns = { .cpu_index = cpu_index, .err = NULL, .handled = false }; nmi_children(object_get_root(), &ns); if (ns.handled) { error_propagate(errp, ns.err); } else { error_setg(errp, QERR_UNSUPPORTED); } } void inject_nmi(void) { #if defined(TARGET_I386) CPUState *cs; CPU_FOREACH(cs) { X86CPU *cpu = X86_CPU(cs); if (!cpu->apic_state) { cpu_interrupt(cs, CPU_INTERRUPT_NMI); } else { apic_deliver_nmi(cpu->apic_state); } } #else nmi_monitor_handle(0, NULL); #endif }
static void load_state_from_tss32(CPUState *cpu, struct x86_tss_segment32 *tss) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; wvmcs(cpu->hvf_fd, VMCS_GUEST_CR3, tss->cr3); RIP(env) = tss->eip; EFLAGS(env) = tss->eflags | 2; /* General purpose registers */ RAX(env) = tss->eax; RCX(env) = tss->ecx; RDX(env) = tss->edx; RBX(env) = tss->ebx; RSP(env) = tss->esp; RBP(env) = tss->ebp; RSI(env) = tss->esi; RDI(env) = tss->edi; vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ldt}}, R_LDTR); vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->es}}, R_ES); vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->cs}}, R_CS); vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ss}}, R_SS); vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->ds}}, R_DS); vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->fs}}, R_FS); vmx_write_segment_selector(cpu, (x68_segment_selector){{tss->gs}}, R_GS); }
static void vmport_ioport_write(void *opaque, hwaddr addr, uint64_t val, unsigned size) { X86CPU *cpu = X86_CPU(current_cpu); cpu->env.regs[R_EAX] = vmport_ioport_read(opaque, addr, 4); }
static uint32_t vmport_cmd_get_version(void *opaque, uint32_t addr) { X86CPU *cpu = X86_CPU(current_cpu); cpu->env.regs[R_EBX] = VMPORT_MAGIC; return 6; }
static uint64_t vmport_ioport_read(void *opaque, hwaddr addr, unsigned size) { VMPortState *s = opaque; CPUState *cs = current_cpu; X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; unsigned char command; uint32_t eax; cpu_synchronize_state(cs); eax = env->regs[R_EAX]; if (eax != VMPORT_MAGIC) { return eax; } command = env->regs[R_ECX]; trace_vmport_command(command); if (command >= VMPORT_ENTRIES || !s->func[command]) { qemu_log_mask(LOG_UNIMP, "vmport: unknown command %x\n", command); return eax; } return s->func[command](s->opaque[command], addr); }
void vmmouse_set_data(const uint32_t *data) { X86CPU *cpu = X86_CPU(current_cpu); CPUX86State *env = &cpu->env; env->regs[R_EAX] = data[0]; env->regs[R_EBX] = data[1]; env->regs[R_ECX] = data[2]; env->regs[R_EDX] = data[3]; env->regs[R_ESI] = data[4]; env->regs[R_EDI] = data[5]; }
/* vmmouse helpers */ void vmmouse_get_data(uint32_t *data) { X86CPU *cpu = X86_CPU(current_cpu); CPUX86State *env = &cpu->env; data[0] = env->regs[R_EAX]; data[1] = env->regs[R_EBX]; data[2] = env->regs[R_ECX]; data[3] = env->regs[R_EDX]; data[4] = env->regs[R_ESI]; data[5] = env->regs[R_EDI]; }
CpuInfoList *qmp_query_cpus(Error **errp) { CpuInfoList *head = NULL, *cur_item = NULL; CPUState *cpu; for (cpu = first_cpu; cpu != NULL; cpu = cpu->next_cpu) { CpuInfoList *info; #if defined(TARGET_I386) X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; #elif defined(TARGET_PPC) PowerPCCPU *ppc_cpu = POWERPC_CPU(cpu); CPUPPCState *env = &ppc_cpu->env; #elif defined(TARGET_SPARC) SPARCCPU *sparc_cpu = SPARC_CPU(cpu); CPUSPARCState *env = &sparc_cpu->env; #elif defined(TARGET_MIPS) MIPSCPU *mips_cpu = MIPS_CPU(cpu); CPUMIPSState *env = &mips_cpu->env; #endif cpu_synchronize_state(cpu); info = g_malloc0(sizeof(*info)); info->value = g_malloc0(sizeof(*info->value)); info->value->CPU = cpu->cpu_index; info->value->current = (cpu == first_cpu); info->value->halted = cpu->halted; info->value->thread_id = cpu->thread_id; #if defined(TARGET_I386) info->value->has_pc = true; info->value->pc = env->eip + env->segs[R_CS].base; #elif defined(TARGET_PPC) info->value->has_nip = true; info->value->nip = env->nip; #elif defined(TARGET_SPARC) info->value->has_pc = true; info->value->pc = env->pc; info->value->has_npc = true; info->value->npc = env->npc; #elif defined(TARGET_MIPS) info->value->has_PC = true; info->value->PC = env->active_tc.PC; #endif /* XXX: waiting for the qapi to support GSList */ if (!cur_item) { head = cur_item = info; } else { cur_item->next = info; cur_item = info; } } return head; }
/* Note: Must be called after VCPU initialization. */ void kvmclock_create(void) { X86CPU *cpu = X86_CPU(first_cpu); if (kvm_enabled() && cpu->env.features[FEAT_KVM] & ((1ULL << KVM_FEATURE_CLOCKSOURCE) | (1ULL << KVM_FEATURE_CLOCKSOURCE2))) { sysbus_create_simple(TYPE_KVM_CLOCK, -1, NULL); } }
/* XXX: fix it to restore all registers */ void tlb_fill(CPUState *cs, target_ulong addr, MMUAccessType access_type, int mmu_idx, uintptr_t retaddr) { int ret; ret = x86_cpu_handle_mmu_fault(cs, addr, access_type, mmu_idx); if (ret) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; raise_exception_err_ra(env, cs->exception_index, env->error_code, retaddr); } }
/* XXX: fix it to restore all registers */ void tlb_fill(CPUState *cs, target_ulong addr, int is_write, int mmu_idx, uintptr_t retaddr) { int ret; ret = x86_cpu_handle_mmu_fault(cs, addr, is_write, mmu_idx); if (ret) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; if (retaddr) { /* now we have a real cpu fault */ cpu_restore_state(cs, retaddr); } raise_exception_err(env, cs->exception_index, env->error_code); } }
/******************************************************************* int memfrs_get_virmem_content( CPUState *cpu, uint64_t cr3, uint64_t target_addr, uint64_t target_length, uint8_t* buf) Get the memory content in virtual memory INPUT: CPUState *cpu Current cpu uint64_t cr3 CR3 value, 0 if no specific process uint64_t target_addr The target address uint64_t target_length The length to be getten uint8_t* buf The buffer to save the memory content OUTPUT: int -1 indicate fails *******************************************************************/ int memfrs_get_virmem_content( CPUState *cpu, uint64_t cr3, uint64_t target_addr, uint64_t target_length, uint8_t* buf) { X86CPU copied_cpu; int ret; memcpy(&copied_cpu, X86_CPU(cpu), sizeof(copied_cpu)); if(cr3 != 0) { copied_cpu.env.cr[3] = cr3; } ret = cpu_memory_rw_debug((CPUState *)&copied_cpu, target_addr, (uint8_t*)buf, target_length, 0); if(ret != 0) { //printf("Fail to read virtual memory\n"); return -1; } return 0; }
static void whpx_vcpu_post_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); WHV_REGISTER_VALUE reg_values[4]; const WHV_REGISTER_NAME reg_names[4] = { WHvX64RegisterRflags, WHvX64RegisterCr8, WHvRegisterPendingInterruption, WHvRegisterInterruptState, }; hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, reg_names, 4, reg_values); if (FAILED(hr)) { error_report("WHPX: Failed to get interrupt state regusters," " hr=%08lx", hr); vcpu->interruptable = false; return; } assert(reg_names[0] == WHvX64RegisterRflags); env->eflags = reg_values[0].Reg64; assert(reg_names[1] == WHvX64RegisterCr8); if (vcpu->tpr != reg_values[1].Reg64) { vcpu->tpr = reg_values[1].Reg64; qemu_mutex_lock_iothread(); cpu_set_apic_tpr(x86_cpu->apic_state, vcpu->tpr); qemu_mutex_unlock_iothread(); } assert(reg_names[2] == WHvRegisterPendingInterruption); vcpu->interrupt_in_flight = reg_values[2].PendingInterruption; assert(reg_names[3] == WHvRegisterInterruptState); vcpu->interruptable = !reg_values[3].InterruptState.InterruptShadow; return; }
static void whpx_vcpu_process_async_events(CPUState *cpu) { struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { do_cpu_init(x86_cpu); cpu->vcpu_dirty = true; vcpu->interruptable = true; } if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(x86_cpu->apic_state); } if (((cpu->interrupt_request & CPU_INTERRUPT_HARD) && (env->eflags & IF_MASK)) || (cpu->interrupt_request & CPU_INTERRUPT_NMI)) { cpu->halted = false; } if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { if (!cpu->vcpu_dirty) { whpx_get_registers(cpu); } do_cpu_sipi(x86_cpu); } if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { cpu->interrupt_request &= ~CPU_INTERRUPT_TPR; if (!cpu->vcpu_dirty) { whpx_get_registers(cpu); } apic_handle_tpr_access_report(x86_cpu->apic_state, env->eip, env->tpr_access_type); } return; }
void do_get_physic_address(struct Monitor *mon, const struct QDict *qdict) { uint64_t target_cr3 = qdict_get_int(qdict, "cr3"); uint64_t target_addr = qdict_get_int(qdict, "addr"); X86CPU *cpu = X86_CPU(ENV_GET_CPU((CPUArchState*)mba_mon_get_cpu())); hwaddr page = target_addr & TARGET_PAGE_MASK; //XXX([email protected]):Only one phase copied. Should be fully copied to resist changes. X86CPU copied_cpu; memcpy(&copied_cpu, cpu, sizeof(copied_cpu)); copied_cpu.env.cr[3] = target_cr3; hwaddr phys_page = cpu_get_phys_page_debug((CPUState*)&copied_cpu, page); if (phys_page == -1) { monitor_printf(mon, "Cannot find physic page\n"); return; } hwaddr phys_addr = phys_page + (target_addr & ~TARGET_PAGE_MASK); monitor_printf(mon, "physic address = %p\n", (void*)phys_addr); }
// TODO: taskswitch handling static void save_state_to_tss32(CPUState *cpu, struct x86_tss_segment32 *tss) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; /* CR3 and ldt selector are not saved intentionally */ tss->eip = EIP(env); tss->eflags = EFLAGS(env); tss->eax = EAX(env); tss->ecx = ECX(env); tss->edx = EDX(env); tss->ebx = EBX(env); tss->esp = ESP(env); tss->ebp = EBP(env); tss->esi = ESI(env); tss->edi = EDI(env); tss->es = vmx_read_segment_selector(cpu, R_ES).sel; tss->cs = vmx_read_segment_selector(cpu, R_CS).sel; tss->ss = vmx_read_segment_selector(cpu, R_SS).sel; tss->ds = vmx_read_segment_selector(cpu, R_DS).sel; tss->fs = vmx_read_segment_selector(cpu, R_FS).sel; tss->gs = vmx_read_segment_selector(cpu, R_GS).sel; }
static bool apic_irq_delivered_needed(void *opaque) { APICCommonState *s = APIC_COMMON(opaque); return s->cpu == X86_CPU(first_cpu) && apic_irq_delivered != 0; }
int x86_cpu_gdb_read_register(CPUState *cs, uint8_t *mem_buf, int n) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; if (n < CPU_NB_REGS) { if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { return gdb_get_reg64(mem_buf, env->regs[gpr_map[n]]); } else if (n < CPU_NB_REGS32) { return gdb_get_reg32(mem_buf, env->regs[gpr_map32[n]]); } } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { #ifdef USE_X86LDOUBLE /* FIXME: byteswap float values - after fixing fpregs layout. */ memcpy(mem_buf, &env->fpregs[n - IDX_FP_REGS], 10); #else memset(mem_buf, 0, 10); #endif return 10; } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { n -= IDX_XMM_REGS; if (n < CPU_NB_REGS32 || (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) { stq_p(mem_buf, env->xmm_regs[n].XMM_Q(0)); stq_p(mem_buf + 8, env->xmm_regs[n].XMM_Q(1)); return 16; } } else { switch (n) { case IDX_IP_REG: if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { return gdb_get_reg64(mem_buf, env->eip); } else { return gdb_get_reg32(mem_buf, env->eip); } case IDX_FLAGS_REG: return gdb_get_reg32(mem_buf, env->eflags); case IDX_SEG_REGS: return gdb_get_reg32(mem_buf, env->segs[R_CS].selector); case IDX_SEG_REGS + 1: return gdb_get_reg32(mem_buf, env->segs[R_SS].selector); case IDX_SEG_REGS + 2: return gdb_get_reg32(mem_buf, env->segs[R_DS].selector); case IDX_SEG_REGS + 3: return gdb_get_reg32(mem_buf, env->segs[R_ES].selector); case IDX_SEG_REGS + 4: return gdb_get_reg32(mem_buf, env->segs[R_FS].selector); case IDX_SEG_REGS + 5: return gdb_get_reg32(mem_buf, env->segs[R_GS].selector); case IDX_FP_REGS + 8: return gdb_get_reg32(mem_buf, env->fpuc); case IDX_FP_REGS + 9: return gdb_get_reg32(mem_buf, (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11); case IDX_FP_REGS + 10: return gdb_get_reg32(mem_buf, 0); /* ftag */ case IDX_FP_REGS + 11: return gdb_get_reg32(mem_buf, 0); /* fiseg */ case IDX_FP_REGS + 12: return gdb_get_reg32(mem_buf, 0); /* fioff */ case IDX_FP_REGS + 13: return gdb_get_reg32(mem_buf, 0); /* foseg */ case IDX_FP_REGS + 14: return gdb_get_reg32(mem_buf, 0); /* fooff */ case IDX_FP_REGS + 15: return gdb_get_reg32(mem_buf, 0); /* fop */ case IDX_MXCSR_REG: return gdb_get_reg32(mem_buf, env->mxcsr); } } return 0; }
int x86_cpu_gdb_write_register(CPUState *cs, uint8_t *mem_buf, int n) { X86CPU *cpu = X86_CPU(cs); CPUX86State *env = &cpu->env; uint32_t tmp; if (n < CPU_NB_REGS) { if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { env->regs[gpr_map[n]] = ldtul_p(mem_buf); return sizeof(target_ulong); } else if (n < CPU_NB_REGS32) { n = gpr_map32[n]; env->regs[n] &= ~0xffffffffUL; env->regs[n] |= (uint32_t)ldl_p(mem_buf); return 4; } } else if (n >= IDX_FP_REGS && n < IDX_FP_REGS + 8) { #ifdef USE_X86LDOUBLE /* FIXME: byteswap float values - after fixing fpregs layout. */ memcpy(&env->fpregs[n - IDX_FP_REGS], mem_buf, 10); #endif return 10; } else if (n >= IDX_XMM_REGS && n < IDX_XMM_REGS + CPU_NB_REGS) { n -= IDX_XMM_REGS; if (n < CPU_NB_REGS32 || (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK)) { env->xmm_regs[n].XMM_Q(0) = ldq_p(mem_buf); env->xmm_regs[n].XMM_Q(1) = ldq_p(mem_buf + 8); return 16; } } else { switch (n) { case IDX_IP_REG: if (TARGET_LONG_BITS == 64 && env->hflags & HF_CS64_MASK) { env->eip = ldq_p(mem_buf); return 8; } else { env->eip &= ~0xffffffffUL; env->eip |= (uint32_t)ldl_p(mem_buf); return 4; } case IDX_FLAGS_REG: env->eflags = ldl_p(mem_buf); return 4; case IDX_SEG_REGS: return x86_cpu_gdb_load_seg(cpu, R_CS, mem_buf); case IDX_SEG_REGS + 1: return x86_cpu_gdb_load_seg(cpu, R_SS, mem_buf); case IDX_SEG_REGS + 2: return x86_cpu_gdb_load_seg(cpu, R_DS, mem_buf); case IDX_SEG_REGS + 3: return x86_cpu_gdb_load_seg(cpu, R_ES, mem_buf); case IDX_SEG_REGS + 4: return x86_cpu_gdb_load_seg(cpu, R_FS, mem_buf); case IDX_SEG_REGS + 5: return x86_cpu_gdb_load_seg(cpu, R_GS, mem_buf); case IDX_FP_REGS + 8: cpu_set_fpuc(env, ldl_p(mem_buf)); return 4; case IDX_FP_REGS + 9: tmp = ldl_p(mem_buf); env->fpstt = (tmp >> 11) & 7; env->fpus = tmp & ~0x3800; return 4; case IDX_FP_REGS + 10: /* ftag */ return 4; case IDX_FP_REGS + 11: /* fiseg */ return 4; case IDX_FP_REGS + 12: /* fioff */ return 4; case IDX_FP_REGS + 13: /* foseg */ return 4; case IDX_FP_REGS + 14: /* fooff */ return 4; case IDX_FP_REGS + 15: /* fop */ return 4; case IDX_MXCSR_REG: cpu_set_mxcsr(env, ldl_p(mem_buf)); return 4; } } /* Unrecognised register. */ return 0; }
int x86_reg_read(struct uc_struct *uc, unsigned int regid, void *value) { CPUState *mycpu = first_cpu; switch(uc->mode) { default: break; case UC_MODE_16: switch(regid) { default: break; case UC_X86_REG_ES: *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_ES].selector; return 0; case UC_X86_REG_SS: *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_SS].selector; return 0; case UC_X86_REG_DS: *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_DS].selector; return 0; case UC_X86_REG_FS: *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_FS].selector; return 0; case UC_X86_REG_GS: *(int16_t *)value = X86_CPU(uc, mycpu)->env.segs[R_GS].selector; return 0; } // fall-thru case UC_MODE_32: switch(regid) { default: break; case UC_X86_REG_CR0 ... UC_X86_REG_CR4: *(int32_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0]; break; case UC_X86_REG_DR0 ... UC_X86_REG_DR7: *(int32_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0]; break; case UC_X86_REG_EFLAGS: *(int32_t *)value = X86_CPU(uc, mycpu)->env.eflags; break; case UC_X86_REG_EAX: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX]; break; case UC_X86_REG_AX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); break; case UC_X86_REG_AH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]); break; case UC_X86_REG_AL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]); break; case UC_X86_REG_EBX: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX]; break; case UC_X86_REG_BX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); break; case UC_X86_REG_BH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]); break; case UC_X86_REG_BL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]); break; case UC_X86_REG_ECX: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX]; break; case UC_X86_REG_CX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); break; case UC_X86_REG_CH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]); break; case UC_X86_REG_CL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]); break; case UC_X86_REG_EDX: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX]; break; case UC_X86_REG_DX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); break; case UC_X86_REG_DH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]); break; case UC_X86_REG_DL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]); break; case UC_X86_REG_ESP: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP]; break; case UC_X86_REG_SP: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); break; case UC_X86_REG_EBP: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP]; break; case UC_X86_REG_BP: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); break; case UC_X86_REG_ESI: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI]; break; case UC_X86_REG_SI: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); break; case UC_X86_REG_EDI: *(int32_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI]; break; case UC_X86_REG_DI: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); break; case UC_X86_REG_EIP: *(int32_t *)value = X86_CPU(uc, mycpu)->env.eip; break; case UC_X86_REG_IP: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip); break; case UC_X86_REG_CS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_CS].selector; break; case UC_X86_REG_DS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_DS].selector; break; case UC_X86_REG_SS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_SS].selector; break; case UC_X86_REG_ES: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_ES].selector; break; case UC_X86_REG_FS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_FS].selector; break; case UC_X86_REG_GS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_GS].selector; break; case UC_X86_REG_IDTR: ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.idt.limit; ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.idt.base; break; case UC_X86_REG_GDTR: ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.gdt.limit; ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.gdt.base; break; case UC_X86_REG_LDTR: ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.ldt.limit; ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.ldt.base; ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.ldt.selector; ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.ldt.flags; break; case UC_X86_REG_TR: ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.tr.limit; ((uc_x86_mmr *)value)->base = (uint32_t)X86_CPU(uc, mycpu)->env.tr.base; ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.tr.selector; ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.tr.flags; break; } break; #ifdef TARGET_X86_64 case UC_MODE_64: switch(regid) { default: break; case UC_X86_REG_CR0 ... UC_X86_REG_CR4: *(int64_t *)value = X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0]; break; case UC_X86_REG_DR0 ... UC_X86_REG_DR7: *(int64_t *)value = X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0]; break; case UC_X86_REG_EFLAGS: *(int64_t *)value = X86_CPU(uc, mycpu)->env.eflags; break; case UC_X86_REG_RAX: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EAX]; break; case UC_X86_REG_EAX: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); break; case UC_X86_REG_AX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX]); break; case UC_X86_REG_AH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX]); break; case UC_X86_REG_AL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX]); break; case UC_X86_REG_RBX: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBX]; break; case UC_X86_REG_EBX: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); break; case UC_X86_REG_BX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX]); break; case UC_X86_REG_BH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX]); break; case UC_X86_REG_BL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX]); break; case UC_X86_REG_RCX: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ECX]; break; case UC_X86_REG_ECX: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); break; case UC_X86_REG_CX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX]); break; case UC_X86_REG_CH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX]); break; case UC_X86_REG_CL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX]); break; case UC_X86_REG_RDX: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDX]; break; case UC_X86_REG_EDX: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); break; case UC_X86_REG_DX: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX]); break; case UC_X86_REG_DH: *(int8_t *)value = READ_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX]); break; case UC_X86_REG_DL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX]); break; case UC_X86_REG_RSP: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESP]; break; case UC_X86_REG_ESP: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); break; case UC_X86_REG_SP: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP]); break; case UC_X86_REG_SPL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP]); break; case UC_X86_REG_RBP: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EBP]; break; case UC_X86_REG_EBP: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); break; case UC_X86_REG_BP: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP]); break; case UC_X86_REG_BPL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP]); break; case UC_X86_REG_RSI: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_ESI]; break; case UC_X86_REG_ESI: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); break; case UC_X86_REG_SI: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI]); break; case UC_X86_REG_SIL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI]); break; case UC_X86_REG_RDI: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.regs[R_EDI]; break; case UC_X86_REG_EDI: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); break; case UC_X86_REG_DI: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI]); break; case UC_X86_REG_DIL: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI]); break; case UC_X86_REG_RIP: *(uint64_t *)value = X86_CPU(uc, mycpu)->env.eip; break; case UC_X86_REG_EIP: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.eip); break; case UC_X86_REG_IP: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.eip); break; case UC_X86_REG_CS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_CS].selector; break; case UC_X86_REG_DS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_DS].selector; break; case UC_X86_REG_SS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_SS].selector; break; case UC_X86_REG_ES: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_ES].selector; break; case UC_X86_REG_FS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_FS].selector; break; case UC_X86_REG_GS: *(int16_t *)value = (uint16_t)X86_CPU(uc, mycpu)->env.segs[R_GS].selector; break; case UC_X86_REG_R8: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[8]); break; case UC_X86_REG_R8D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[8]); break; case UC_X86_REG_R8W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[8]); break; case UC_X86_REG_R8B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8]); break; case UC_X86_REG_R9: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[9]); break; case UC_X86_REG_R9D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[9]); break; case UC_X86_REG_R9W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[9]); break; case UC_X86_REG_R9B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9]); break; case UC_X86_REG_R10: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[10]); break; case UC_X86_REG_R10D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[10]); break; case UC_X86_REG_R10W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[10]); break; case UC_X86_REG_R10B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10]); break; case UC_X86_REG_R11: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[11]); break; case UC_X86_REG_R11D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[11]); break; case UC_X86_REG_R11W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[11]); break; case UC_X86_REG_R11B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11]); break; case UC_X86_REG_R12: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[12]); break; case UC_X86_REG_R12D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[12]); break; case UC_X86_REG_R12W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[12]); break; case UC_X86_REG_R12B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12]); break; case UC_X86_REG_R13: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[13]); break; case UC_X86_REG_R13D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[13]); break; case UC_X86_REG_R13W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[13]); break; case UC_X86_REG_R13B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13]); break; case UC_X86_REG_R14: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[14]); break; case UC_X86_REG_R14D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[14]); break; case UC_X86_REG_R14W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[14]); break; case UC_X86_REG_R14B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14]); break; case UC_X86_REG_R15: *(int64_t *)value = READ_QWORD(X86_CPU(uc, mycpu)->env.regs[15]); break; case UC_X86_REG_R15D: *(int32_t *)value = READ_DWORD(X86_CPU(uc, mycpu)->env.regs[15]); break; case UC_X86_REG_R15W: *(int16_t *)value = READ_WORD(X86_CPU(uc, mycpu)->env.regs[15]); break; case UC_X86_REG_R15B: *(int8_t *)value = READ_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15]); break; case UC_X86_REG_IDTR: ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.idt.limit; ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.idt.base; break; case UC_X86_REG_GDTR: ((uc_x86_mmr *)value)->limit = (uint16_t)X86_CPU(uc, mycpu)->env.gdt.limit; ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.gdt.base; break; case UC_X86_REG_LDTR: ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.ldt.limit; ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.ldt.base; ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.ldt.selector; ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.ldt.flags; break; case UC_X86_REG_TR: ((uc_x86_mmr *)value)->limit = X86_CPU(uc, mycpu)->env.tr.limit; ((uc_x86_mmr *)value)->base = X86_CPU(uc, mycpu)->env.tr.base; ((uc_x86_mmr *)value)->selector = (uint16_t)X86_CPU(uc, mycpu)->env.tr.selector; ((uc_x86_mmr *)value)->flags = X86_CPU(uc, mycpu)->env.tr.flags; break; } break; #endif } return 0; }
bool x86_is_v8086(struct CPUState *cpu) { X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; return x86_is_protected(cpu) && (RFLAGS(env) & RFLAGS_VM); }
int x86_reg_write(struct uc_struct *uc, unsigned int regid, const void *value) { CPUState *mycpu = first_cpu; switch(uc->mode) { default: break; case UC_MODE_16: switch(regid) { default: break; case UC_X86_REG_ES: X86_CPU(uc, mycpu)->env.segs[R_ES].selector = *(uint16_t *)value; return 0; case UC_X86_REG_SS: X86_CPU(uc, mycpu)->env.segs[R_SS].selector = *(uint16_t *)value; return 0; case UC_X86_REG_DS: X86_CPU(uc, mycpu)->env.segs[R_DS].selector = *(uint16_t *)value; return 0; case UC_X86_REG_FS: X86_CPU(uc, mycpu)->env.segs[R_FS].selector = *(uint16_t *)value; return 0; case UC_X86_REG_GS: X86_CPU(uc, mycpu)->env.segs[R_GS].selector = *(uint16_t *)value; return 0; } // fall-thru case UC_MODE_32: switch(regid) { default: break; case UC_X86_REG_CR0 ... UC_X86_REG_CR4: X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0] = *(uint32_t *)value; break; case UC_X86_REG_DR0 ... UC_X86_REG_DR7: X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0] = *(uint32_t *)value; break; case UC_X86_REG_EFLAGS: X86_CPU(uc, mycpu)->env.eflags = *(uint32_t *)value; X86_CPU(uc, mycpu)->env.eflags0 = *(uint32_t *)value; break; case UC_X86_REG_EAX: X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(uint32_t *)value; break; case UC_X86_REG_AX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint16_t *)value); break; case UC_X86_REG_AH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_AL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_EBX: X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(uint32_t *)value; break; case UC_X86_REG_BX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint16_t *)value); break; case UC_X86_REG_BH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_BL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_ECX: X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(uint32_t *)value; break; case UC_X86_REG_CX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint16_t *)value); break; case UC_X86_REG_CH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_CL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_EDX: X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(uint32_t *)value; break; case UC_X86_REG_DX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint16_t *)value); break; case UC_X86_REG_DH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_DL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_ESP: X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(uint32_t *)value; break; case UC_X86_REG_SP: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint16_t *)value); break; case UC_X86_REG_EBP: X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(uint32_t *)value; break; case UC_X86_REG_BP: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint16_t *)value); break; case UC_X86_REG_ESI: X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(uint32_t *)value; break; case UC_X86_REG_SI: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint16_t *)value); break; case UC_X86_REG_EDI: X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(uint32_t *)value; break; case UC_X86_REG_DI: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint16_t *)value); break; case UC_X86_REG_EIP: X86_CPU(uc, mycpu)->env.eip = *(uint32_t *)value; // force to quit execution and flush TB uc->quit_request = true; uc_emu_stop(uc); break; case UC_X86_REG_IP: WRITE_WORD(X86_CPU(uc, mycpu)->env.eip, *(uint16_t *)value); // force to quit execution and flush TB uc->quit_request = true; uc_emu_stop(uc); break; case UC_X86_REG_CS: X86_CPU(uc, mycpu)->env.segs[R_CS].selector = *(uint16_t *)value; break; case UC_X86_REG_DS: X86_CPU(uc, mycpu)->env.segs[R_DS].selector = *(uint16_t *)value; break; case UC_X86_REG_SS: X86_CPU(uc, mycpu)->env.segs[R_SS].selector = *(uint16_t *)value; break; case UC_X86_REG_ES: X86_CPU(uc, mycpu)->env.segs[R_ES].selector = *(uint16_t *)value; break; case UC_X86_REG_FS: X86_CPU(uc, mycpu)->env.segs[R_FS].selector = *(uint16_t *)value; break; case UC_X86_REG_GS: X86_CPU(uc, mycpu)->env.segs[R_GS].selector = *(uint16_t *)value; break; case UC_X86_REG_IDTR: X86_CPU(uc, mycpu)->env.idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.idt.base = (uint32_t)((uc_x86_mmr *)value)->base; break; case UC_X86_REG_GDTR: X86_CPU(uc, mycpu)->env.gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.gdt.base = (uint32_t)((uc_x86_mmr *)value)->base; break; case UC_X86_REG_LDTR: X86_CPU(uc, mycpu)->env.ldt.limit = ((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.ldt.base = (uint32_t)((uc_x86_mmr *)value)->base; X86_CPU(uc, mycpu)->env.ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; X86_CPU(uc, mycpu)->env.ldt.flags = ((uc_x86_mmr *)value)->flags; break; case UC_X86_REG_TR: X86_CPU(uc, mycpu)->env.tr.limit = ((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.tr.base = (uint32_t)((uc_x86_mmr *)value)->base; X86_CPU(uc, mycpu)->env.tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; X86_CPU(uc, mycpu)->env.tr.flags = ((uc_x86_mmr *)value)->flags; break; } break; #ifdef TARGET_X86_64 case UC_MODE_64: switch(regid) { default: break; case UC_X86_REG_CR0 ... UC_X86_REG_CR4: X86_CPU(uc, mycpu)->env.cr[regid - UC_X86_REG_CR0] = *(uint64_t *)value; break; case UC_X86_REG_DR0 ... UC_X86_REG_DR7: X86_CPU(uc, mycpu)->env.dr[regid - UC_X86_REG_DR0] = *(uint64_t *)value; break; case UC_X86_REG_EFLAGS: X86_CPU(uc, mycpu)->env.eflags = *(uint64_t *)value; X86_CPU(uc, mycpu)->env.eflags0 = *(uint64_t *)value; break; case UC_X86_REG_RAX: X86_CPU(uc, mycpu)->env.regs[R_EAX] = *(uint64_t *)value; break; case UC_X86_REG_EAX: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint32_t *)value); break; case UC_X86_REG_AX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint16_t *)value); break; case UC_X86_REG_AH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_AL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EAX], *(uint8_t *)value); break; case UC_X86_REG_RBX: X86_CPU(uc, mycpu)->env.regs[R_EBX] = *(uint64_t *)value; break; case UC_X86_REG_EBX: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint32_t *)value); break; case UC_X86_REG_BX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint16_t *)value); break; case UC_X86_REG_BH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_BL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBX], *(uint8_t *)value); break; case UC_X86_REG_RCX: X86_CPU(uc, mycpu)->env.regs[R_ECX] = *(uint64_t *)value; break; case UC_X86_REG_ECX: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint32_t *)value); break; case UC_X86_REG_CX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint16_t *)value); break; case UC_X86_REG_CH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_CL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ECX], *(uint8_t *)value); break; case UC_X86_REG_RDX: X86_CPU(uc, mycpu)->env.regs[R_EDX] = *(uint64_t *)value; break; case UC_X86_REG_EDX: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint32_t *)value); break; case UC_X86_REG_DX: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint16_t *)value); break; case UC_X86_REG_DH: WRITE_BYTE_H(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_DL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDX], *(uint8_t *)value); break; case UC_X86_REG_RSP: X86_CPU(uc, mycpu)->env.regs[R_ESP] = *(uint64_t *)value; break; case UC_X86_REG_ESP: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint32_t *)value); break; case UC_X86_REG_SP: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint16_t *)value); break; case UC_X86_REG_SPL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESP], *(uint8_t *)value); break; case UC_X86_REG_RBP: X86_CPU(uc, mycpu)->env.regs[R_EBP] = *(uint64_t *)value; break; case UC_X86_REG_EBP: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint32_t *)value); break; case UC_X86_REG_BP: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint16_t *)value); break; case UC_X86_REG_BPL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EBP], *(uint8_t *)value); break; case UC_X86_REG_RSI: X86_CPU(uc, mycpu)->env.regs[R_ESI] = *(uint64_t *)value; break; case UC_X86_REG_ESI: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint32_t *)value); break; case UC_X86_REG_SI: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint16_t *)value); break; case UC_X86_REG_SIL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_ESI], *(uint8_t *)value); break; case UC_X86_REG_RDI: X86_CPU(uc, mycpu)->env.regs[R_EDI] = *(uint64_t *)value; break; case UC_X86_REG_EDI: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint32_t *)value); break; case UC_X86_REG_DI: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint16_t *)value); break; case UC_X86_REG_DIL: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[R_EDI], *(uint8_t *)value); break; case UC_X86_REG_RIP: X86_CPU(uc, mycpu)->env.eip = *(uint64_t *)value; // force to quit execution and flush TB uc->quit_request = true; uc_emu_stop(uc); break; case UC_X86_REG_EIP: WRITE_DWORD(X86_CPU(uc, mycpu)->env.eip, *(uint32_t *)value); // force to quit execution and flush TB uc->quit_request = true; uc_emu_stop(uc); break; case UC_X86_REG_IP: WRITE_WORD(X86_CPU(uc, mycpu)->env.eip, *(uint16_t *)value); // force to quit execution and flush TB uc->quit_request = true; uc_emu_stop(uc); break; case UC_X86_REG_CS: X86_CPU(uc, mycpu)->env.segs[R_CS].selector = *(uint16_t *)value; break; case UC_X86_REG_DS: X86_CPU(uc, mycpu)->env.segs[R_DS].selector = *(uint16_t *)value; break; case UC_X86_REG_SS: X86_CPU(uc, mycpu)->env.segs[R_SS].selector = *(uint16_t *)value; break; case UC_X86_REG_ES: X86_CPU(uc, mycpu)->env.segs[R_ES].selector = *(uint16_t *)value; break; case UC_X86_REG_FS: X86_CPU(uc, mycpu)->env.segs[R_FS].selector = *(uint16_t *)value; break; case UC_X86_REG_GS: X86_CPU(uc, mycpu)->env.segs[R_GS].selector = *(uint16_t *)value; break; case UC_X86_REG_R8: X86_CPU(uc, mycpu)->env.regs[8] = *(uint64_t *)value; break; case UC_X86_REG_R8D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[8], *(uint32_t *)value); break; case UC_X86_REG_R8W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[8], *(uint16_t *)value); break; case UC_X86_REG_R8B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[8], *(uint8_t *)value); break; case UC_X86_REG_R9: X86_CPU(uc, mycpu)->env.regs[9] = *(uint64_t *)value; break; case UC_X86_REG_R9D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[9], *(uint32_t *)value); break; case UC_X86_REG_R9W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[9], *(uint16_t *)value); break; case UC_X86_REG_R9B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[9], *(uint8_t *)value); break; case UC_X86_REG_R10: X86_CPU(uc, mycpu)->env.regs[10] = *(uint64_t *)value; break; case UC_X86_REG_R10D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[10], *(uint32_t *)value); break; case UC_X86_REG_R10W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[10], *(uint16_t *)value); break; case UC_X86_REG_R10B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[10], *(uint8_t *)value); break; case UC_X86_REG_R11: X86_CPU(uc, mycpu)->env.regs[11] = *(uint64_t *)value; break; case UC_X86_REG_R11D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[11], *(uint32_t *)value); break; case UC_X86_REG_R11W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[11], *(uint16_t *)value); break; case UC_X86_REG_R11B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[11], *(uint8_t *)value); break; case UC_X86_REG_R12: X86_CPU(uc, mycpu)->env.regs[12] = *(uint64_t *)value; break; case UC_X86_REG_R12D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[12], *(uint32_t *)value); break; case UC_X86_REG_R12W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[12], *(uint16_t *)value); break; case UC_X86_REG_R12B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[12], *(uint8_t *)value); break; case UC_X86_REG_R13: X86_CPU(uc, mycpu)->env.regs[13] = *(uint64_t *)value; break; case UC_X86_REG_R13D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[13], *(uint32_t *)value); break; case UC_X86_REG_R13W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[13], *(uint16_t *)value); break; case UC_X86_REG_R13B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[13], *(uint8_t *)value); break; case UC_X86_REG_R14: X86_CPU(uc, mycpu)->env.regs[14] = *(uint64_t *)value; break; case UC_X86_REG_R14D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[14], *(uint32_t *)value); break; case UC_X86_REG_R14W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[14], *(uint16_t *)value); break; case UC_X86_REG_R14B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[14], *(uint8_t *)value); break; case UC_X86_REG_R15: X86_CPU(uc, mycpu)->env.regs[15] = *(uint64_t *)value; break; case UC_X86_REG_R15D: WRITE_DWORD(X86_CPU(uc, mycpu)->env.regs[15], *(uint32_t *)value); break; case UC_X86_REG_R15W: WRITE_WORD(X86_CPU(uc, mycpu)->env.regs[15], *(uint16_t *)value); break; case UC_X86_REG_R15B: WRITE_BYTE_L(X86_CPU(uc, mycpu)->env.regs[15], *(uint8_t *)value); break; case UC_X86_REG_IDTR: X86_CPU(uc, mycpu)->env.idt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.idt.base = ((uc_x86_mmr *)value)->base; break; case UC_X86_REG_GDTR: X86_CPU(uc, mycpu)->env.gdt.limit = (uint16_t)((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.gdt.base = ((uc_x86_mmr *)value)->base; break; case UC_X86_REG_LDTR: X86_CPU(uc, mycpu)->env.ldt.limit = ((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.ldt.base = ((uc_x86_mmr *)value)->base; X86_CPU(uc, mycpu)->env.ldt.selector = (uint16_t)((uc_x86_mmr *)value)->selector; X86_CPU(uc, mycpu)->env.ldt.flags = ((uc_x86_mmr *)value)->flags; break; case UC_X86_REG_TR: X86_CPU(uc, mycpu)->env.tr.limit = ((uc_x86_mmr *)value)->limit; X86_CPU(uc, mycpu)->env.tr.base = ((uc_x86_mmr *)value)->base; X86_CPU(uc, mycpu)->env.tr.selector = (uint16_t)((uc_x86_mmr *)value)->selector; X86_CPU(uc, mycpu)->env.tr.flags = ((uc_x86_mmr *)value)->flags; break; } break; #endif } return 0; }
void vmx_handle_task_switch(CPUState *cpu, x68_segment_selector tss_sel, int reason, bool gate_valid, uint8_t gate, uint64_t gate_type) { uint64_t rip = rreg(cpu->hvf_fd, HV_X86_RIP); if (!gate_valid || (gate_type != VMCS_INTR_T_HWEXCEPTION && gate_type != VMCS_INTR_T_HWINTR && gate_type != VMCS_INTR_T_NMI)) { int ins_len = rvmcs(cpu->hvf_fd, VMCS_EXIT_INSTRUCTION_LENGTH); macvm_set_rip(cpu, rip + ins_len); return; } load_regs(cpu); struct x86_segment_descriptor curr_tss_desc, next_tss_desc; int ret; x68_segment_selector old_tss_sel = vmx_read_segment_selector(cpu, R_TR); uint64_t old_tss_base = vmx_read_segment_base(cpu, R_TR); uint32_t desc_limit; struct x86_call_gate task_gate_desc; struct vmx_segment vmx_seg; X86CPU *x86_cpu = X86_CPU(cpu); CPUX86State *env = &x86_cpu->env; x86_read_segment_descriptor(cpu, &next_tss_desc, tss_sel); x86_read_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel); if (reason == TSR_IDT_GATE && gate_valid) { int dpl; ret = x86_read_call_gate(cpu, &task_gate_desc, gate); dpl = task_gate_desc.dpl; x68_segment_selector cs = vmx_read_segment_selector(cpu, R_CS); if (tss_sel.rpl > dpl || cs.rpl > dpl) ;//DPRINTF("emulate_gp"); } desc_limit = x86_segment_limit(&next_tss_desc); if (!next_tss_desc.p || ((desc_limit < 0x67 && (next_tss_desc.type & 8)) || desc_limit < 0x2b)) { VM_PANIC("emulate_ts"); } if (reason == TSR_IRET || reason == TSR_JMP) { curr_tss_desc.type &= ~(1 << 1); /* clear busy flag */ x86_write_segment_descriptor(cpu, &curr_tss_desc, old_tss_sel); } if (reason == TSR_IRET) EFLAGS(env) &= ~RFLAGS_NT; if (reason != TSR_CALL && reason != TSR_IDT_GATE) old_tss_sel.sel = 0xffff; if (reason != TSR_IRET) { next_tss_desc.type |= (1 << 1); /* set busy flag */ x86_write_segment_descriptor(cpu, &next_tss_desc, tss_sel); } if (next_tss_desc.type & 8) ret = task_switch_32(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); else //ret = task_switch_16(cpu, tss_sel, old_tss_sel, old_tss_base, &next_tss_desc); VM_PANIC("task_switch_16"); macvm_set_cr0(cpu->hvf_fd, rvmcs(cpu->hvf_fd, VMCS_GUEST_CR0) | CR0_TS); x86_segment_descriptor_to_vmx(cpu, tss_sel, &next_tss_desc, &vmx_seg); vmx_write_segment_descriptor(cpu, &vmx_seg, R_TR); store_regs(cpu); hv_vcpu_invalidate_tlb(cpu->hvf_fd); hv_vcpu_flush(cpu->hvf_fd); }
static void whpx_vcpu_pre_run(CPUState *cpu) { HRESULT hr; struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); int irq; uint8_t tpr; WHV_X64_PENDING_INTERRUPTION_REGISTER new_int = {0}; UINT32 reg_count = 0; WHV_REGISTER_VALUE reg_values[3] = {0}; WHV_REGISTER_NAME reg_names[3]; qemu_mutex_lock_iothread(); /* Inject NMI */ if (!vcpu->interrupt_in_flight.InterruptionPending && cpu->interrupt_request & (CPU_INTERRUPT_NMI | CPU_INTERRUPT_SMI)) { if (cpu->interrupt_request & CPU_INTERRUPT_NMI) { cpu->interrupt_request &= ~CPU_INTERRUPT_NMI; vcpu->interruptable = false; new_int.InterruptionType = WHvX64PendingNmi; new_int.InterruptionPending = 1; new_int.InterruptionVector = 2; } if (cpu->interrupt_request & CPU_INTERRUPT_SMI) { cpu->interrupt_request &= ~CPU_INTERRUPT_SMI; } } /* * Force the VCPU out of its inner loop to process any INIT requests or * commit pending TPR access. */ if (cpu->interrupt_request & (CPU_INTERRUPT_INIT | CPU_INTERRUPT_TPR)) { if ((cpu->interrupt_request & CPU_INTERRUPT_INIT) && !(env->hflags & HF_SMM_MASK)) { cpu->exit_request = 1; } if (cpu->interrupt_request & CPU_INTERRUPT_TPR) { cpu->exit_request = 1; } } /* Get pending hard interruption or replay one that was overwritten */ if (!vcpu->interrupt_in_flight.InterruptionPending && vcpu->interruptable && (env->eflags & IF_MASK)) { assert(!new_int.InterruptionPending); if (cpu->interrupt_request & CPU_INTERRUPT_HARD) { cpu->interrupt_request &= ~CPU_INTERRUPT_HARD; irq = cpu_get_pic_interrupt(env); if (irq >= 0) { new_int.InterruptionType = WHvX64PendingInterrupt; new_int.InterruptionPending = 1; new_int.InterruptionVector = irq; } } } /* Setup interrupt state if new one was prepared */ if (new_int.InterruptionPending) { reg_values[reg_count].PendingInterruption = new_int; reg_names[reg_count] = WHvRegisterPendingInterruption; reg_count += 1; } /* Sync the TPR to the CR8 if was modified during the intercept */ tpr = cpu_get_apic_tpr(x86_cpu->apic_state); if (tpr != vcpu->tpr) { vcpu->tpr = tpr; reg_values[reg_count].Reg64 = tpr; cpu->exit_request = 1; reg_names[reg_count] = WHvX64RegisterCr8; reg_count += 1; } /* Update the state of the interrupt delivery notification */ if (!vcpu->window_registered && cpu->interrupt_request & CPU_INTERRUPT_HARD) { reg_values[reg_count].DeliverabilityNotifications.InterruptNotification = 1; vcpu->window_registered = 1; reg_names[reg_count] = WHvX64RegisterDeliverabilityNotifications; reg_count += 1; } qemu_mutex_unlock_iothread(); if (reg_count) { hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, reg_names, reg_count, reg_values); if (FAILED(hr)) { error_report("WHPX: Failed to set interrupt state registers," " hr=%08lx", hr); } } return; }
static void whpx_get_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt; uint64_t tpr, apic_base; HRESULT hr; int idx = 0; int i; assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); hr = WHvGetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, whpx_register_names, RTL_NUMBER_OF(whpx_register_names), &vcxt.values[0]); if (FAILED(hr)) { error_report("WHPX: Failed to get virtual processor context, hr=%08lx", hr); } /* Indexes for first 16 registers match between HV and QEMU definitions */ for (idx = 0; idx < CPU_NB_REGS64; idx += 1) { env->regs[idx] = vcxt.values[idx].Reg64; } /* Same goes for RIP and RFLAGS */ assert(whpx_register_names[idx] == WHvX64RegisterRip); env->eip = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterRflags); env->eflags = vcxt.values[idx++].Reg64; /* Translate 6+4 segment registers. HV and QEMU order matches */ assert(idx == WHvX64RegisterEs); for (i = 0; i < 6; i += 1, idx += 1) { env->segs[i] = whpx_seg_h2q(&vcxt.values[idx].Segment); } assert(idx == WHvX64RegisterLdtr); env->ldt = whpx_seg_h2q(&vcxt.values[idx++].Segment); assert(idx == WHvX64RegisterTr); env->tr = whpx_seg_h2q(&vcxt.values[idx++].Segment); assert(idx == WHvX64RegisterIdtr); env->idt.base = vcxt.values[idx].Table.Base; env->idt.limit = vcxt.values[idx].Table.Limit; idx += 1; assert(idx == WHvX64RegisterGdtr); env->gdt.base = vcxt.values[idx].Table.Base; env->gdt.limit = vcxt.values[idx].Table.Limit; idx += 1; /* CR0, 2, 3, 4, 8 */ assert(whpx_register_names[idx] == WHvX64RegisterCr0); env->cr[0] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr2); env->cr[2] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr3); env->cr[3] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr4); env->cr[4] = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCr8); tpr = vcxt.values[idx++].Reg64; if (tpr != vcpu->tpr) { vcpu->tpr = tpr; cpu_set_apic_tpr(x86_cpu->apic_state, tpr); } /* 8 Debug Registers - Skipped */ /* 16 XMM registers */ assert(whpx_register_names[idx] == WHvX64RegisterXmm0); for (i = 0; i < 16; i += 1, idx += 1) { env->xmm_regs[i].ZMM_Q(0) = vcxt.values[idx].Reg128.Low64; env->xmm_regs[i].ZMM_Q(1) = vcxt.values[idx].Reg128.High64; } /* 8 FP registers */ assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0); for (i = 0; i < 8; i += 1, idx += 1) { env->fpregs[i].mmx.MMX_Q(0) = vcxt.values[idx].Fp.AsUINT128.Low64; /* env->fpregs[i].mmx.MMX_Q(1) = vcxt.values[idx].Fp.AsUINT128.High64; */ } /* FP control status register */ assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus); env->fpuc = vcxt.values[idx].FpControlStatus.FpControl; env->fpstt = (vcxt.values[idx].FpControlStatus.FpStatus >> 11) & 0x7; env->fpus = vcxt.values[idx].FpControlStatus.FpStatus & ~0x3800; for (i = 0; i < 8; ++i) { env->fptags[i] = !((vcxt.values[idx].FpControlStatus.FpTag >> i) & 1); } env->fpop = vcxt.values[idx].FpControlStatus.LastFpOp; env->fpip = vcxt.values[idx].FpControlStatus.LastFpRip; idx += 1; /* XMM control status register */ assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus); env->mxcsr = vcxt.values[idx].XmmControlStatus.XmmStatusControl; idx += 1; /* MSRs */ assert(whpx_register_names[idx] == WHvX64RegisterTsc); env->tsc = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterEfer); env->efer = vcxt.values[idx++].Reg64; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase); env->kernelgsbase = vcxt.values[idx++].Reg64; #endif assert(whpx_register_names[idx] == WHvX64RegisterApicBase); apic_base = vcxt.values[idx++].Reg64; if (apic_base != vcpu->apic_base) { vcpu->apic_base = apic_base; cpu_set_apic_base(x86_cpu->apic_state, vcpu->apic_base); } /* WHvX64RegisterPat - Skipped */ assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs); env->sysenter_cs = vcxt.values[idx++].Reg64;; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip); env->sysenter_eip = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp); env->sysenter_esp = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterStar); env->star = vcxt.values[idx++].Reg64; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterLstar); env->lstar = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterCstar); env->cstar = vcxt.values[idx++].Reg64; assert(whpx_register_names[idx] == WHvX64RegisterSfmask); env->fmask = vcxt.values[idx++].Reg64; #endif /* Interrupt / Event Registers - Skipped */ assert(idx == RTL_NUMBER_OF(whpx_register_names)); return; }
static void whpx_set_registers(CPUState *cpu) { struct whpx_state *whpx = &whpx_global; struct whpx_vcpu *vcpu = get_whpx_vcpu(cpu); struct CPUX86State *env = (CPUArchState *)(cpu->env_ptr); X86CPU *x86_cpu = X86_CPU(cpu); struct whpx_register_set vcxt = {0}; HRESULT hr; int idx = 0; int i; int v86, r86; assert(cpu_is_stopped(cpu) || qemu_cpu_is_self(cpu)); v86 = (env->eflags & VM_MASK); r86 = !(env->cr[0] & CR0_PE_MASK); vcpu->tpr = cpu_get_apic_tpr(x86_cpu->apic_state); vcpu->apic_base = cpu_get_apic_base(x86_cpu->apic_state); /* Indexes for first 16 registers match between HV and QEMU definitions */ for (idx = 0; idx < CPU_NB_REGS64; idx += 1) { vcxt.values[idx].Reg64 = env->regs[idx]; } /* Same goes for RIP and RFLAGS */ assert(whpx_register_names[idx] == WHvX64RegisterRip); vcxt.values[idx++].Reg64 = env->eip; assert(whpx_register_names[idx] == WHvX64RegisterRflags); vcxt.values[idx++].Reg64 = env->eflags; /* Translate 6+4 segment registers. HV and QEMU order matches */ assert(idx == WHvX64RegisterEs); for (i = 0; i < 6; i += 1, idx += 1) { vcxt.values[idx].Segment = whpx_seg_q2h(&env->segs[i], v86, r86); } assert(idx == WHvX64RegisterLdtr); vcxt.values[idx++].Segment = whpx_seg_q2h(&env->ldt, 0, 0); assert(idx == WHvX64RegisterTr); vcxt.values[idx++].Segment = whpx_seg_q2h(&env->tr, 0, 0); assert(idx == WHvX64RegisterIdtr); vcxt.values[idx].Table.Base = env->idt.base; vcxt.values[idx].Table.Limit = env->idt.limit; idx += 1; assert(idx == WHvX64RegisterGdtr); vcxt.values[idx].Table.Base = env->gdt.base; vcxt.values[idx].Table.Limit = env->gdt.limit; idx += 1; /* CR0, 2, 3, 4, 8 */ assert(whpx_register_names[idx] == WHvX64RegisterCr0); vcxt.values[idx++].Reg64 = env->cr[0]; assert(whpx_register_names[idx] == WHvX64RegisterCr2); vcxt.values[idx++].Reg64 = env->cr[2]; assert(whpx_register_names[idx] == WHvX64RegisterCr3); vcxt.values[idx++].Reg64 = env->cr[3]; assert(whpx_register_names[idx] == WHvX64RegisterCr4); vcxt.values[idx++].Reg64 = env->cr[4]; assert(whpx_register_names[idx] == WHvX64RegisterCr8); vcxt.values[idx++].Reg64 = vcpu->tpr; /* 8 Debug Registers - Skipped */ /* 16 XMM registers */ assert(whpx_register_names[idx] == WHvX64RegisterXmm0); for (i = 0; i < 16; i += 1, idx += 1) { vcxt.values[idx].Reg128.Low64 = env->xmm_regs[i].ZMM_Q(0); vcxt.values[idx].Reg128.High64 = env->xmm_regs[i].ZMM_Q(1); } /* 8 FP registers */ assert(whpx_register_names[idx] == WHvX64RegisterFpMmx0); for (i = 0; i < 8; i += 1, idx += 1) { vcxt.values[idx].Fp.AsUINT128.Low64 = env->fpregs[i].mmx.MMX_Q(0); /* vcxt.values[idx].Fp.AsUINT128.High64 = env->fpregs[i].mmx.MMX_Q(1); */ } /* FP control status register */ assert(whpx_register_names[idx] == WHvX64RegisterFpControlStatus); vcxt.values[idx].FpControlStatus.FpControl = env->fpuc; vcxt.values[idx].FpControlStatus.FpStatus = (env->fpus & ~0x3800) | (env->fpstt & 0x7) << 11; vcxt.values[idx].FpControlStatus.FpTag = 0; for (i = 0; i < 8; ++i) { vcxt.values[idx].FpControlStatus.FpTag |= (!env->fptags[i]) << i; } vcxt.values[idx].FpControlStatus.Reserved = 0; vcxt.values[idx].FpControlStatus.LastFpOp = env->fpop; vcxt.values[idx].FpControlStatus.LastFpRip = env->fpip; idx += 1; /* XMM control status register */ assert(whpx_register_names[idx] == WHvX64RegisterXmmControlStatus); vcxt.values[idx].XmmControlStatus.LastFpRdp = 0; vcxt.values[idx].XmmControlStatus.XmmStatusControl = env->mxcsr; vcxt.values[idx].XmmControlStatus.XmmStatusControlMask = 0x0000ffff; idx += 1; /* MSRs */ assert(whpx_register_names[idx] == WHvX64RegisterTsc); vcxt.values[idx++].Reg64 = env->tsc; assert(whpx_register_names[idx] == WHvX64RegisterEfer); vcxt.values[idx++].Reg64 = env->efer; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterKernelGsBase); vcxt.values[idx++].Reg64 = env->kernelgsbase; #endif assert(whpx_register_names[idx] == WHvX64RegisterApicBase); vcxt.values[idx++].Reg64 = vcpu->apic_base; /* WHvX64RegisterPat - Skipped */ assert(whpx_register_names[idx] == WHvX64RegisterSysenterCs); vcxt.values[idx++].Reg64 = env->sysenter_cs; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEip); vcxt.values[idx++].Reg64 = env->sysenter_eip; assert(whpx_register_names[idx] == WHvX64RegisterSysenterEsp); vcxt.values[idx++].Reg64 = env->sysenter_esp; assert(whpx_register_names[idx] == WHvX64RegisterStar); vcxt.values[idx++].Reg64 = env->star; #ifdef TARGET_X86_64 assert(whpx_register_names[idx] == WHvX64RegisterLstar); vcxt.values[idx++].Reg64 = env->lstar; assert(whpx_register_names[idx] == WHvX64RegisterCstar); vcxt.values[idx++].Reg64 = env->cstar; assert(whpx_register_names[idx] == WHvX64RegisterSfmask); vcxt.values[idx++].Reg64 = env->fmask; #endif /* Interrupt / Event Registers - Skipped */ assert(idx == RTL_NUMBER_OF(whpx_register_names)); hr = WHvSetVirtualProcessorRegisters(whpx->partition, cpu->cpu_index, whpx_register_names, RTL_NUMBER_OF(whpx_register_names), &vcxt.values[0]); if (FAILED(hr)) { error_report("WHPX: Failed to set virtual processor context, hr=%08lx", hr); } return; }
/* * Ask hax kernel module to run the CPU for us till: * 1. Guest crash or shutdown * 2. Need QEMU's emulation like guest execute MMIO instruction * 3. Guest execute HLT * 4. QEMU have Signal/event pending * 5. An unknown VMX exit happens */ static int hax_vcpu_hax_exec(CPUArchState *env) { int ret = 0; CPUState *cpu = ENV_GET_CPU(env); X86CPU *x86_cpu = X86_CPU(cpu); struct hax_vcpu_state *vcpu = cpu->hax_vcpu; struct hax_tunnel *ht = vcpu->tunnel; if (!hax_enabled()) { DPRINTF("Trying to vcpu execute at eip:" TARGET_FMT_lx "\n", env->eip); return 0; } cpu->halted = 0; if (cpu->interrupt_request & CPU_INTERRUPT_POLL) { cpu->interrupt_request &= ~CPU_INTERRUPT_POLL; apic_poll_irq(x86_cpu->apic_state); } if (cpu->interrupt_request & CPU_INTERRUPT_INIT) { DPRINTF("\nhax_vcpu_hax_exec: handling INIT for %d\n", cpu->cpu_index); do_cpu_init(x86_cpu); hax_vcpu_sync_state(env, 1); } if (cpu->interrupt_request & CPU_INTERRUPT_SIPI) { DPRINTF("hax_vcpu_hax_exec: handling SIPI for %d\n", cpu->cpu_index); hax_vcpu_sync_state(env, 0); do_cpu_sipi(x86_cpu); hax_vcpu_sync_state(env, 1); } do { int hax_ret; if (cpu->exit_request) { ret = 1; break; } hax_vcpu_interrupt(env); qemu_mutex_unlock_iothread(); cpu_exec_start(cpu); hax_ret = hax_vcpu_run(vcpu); cpu_exec_end(cpu); qemu_mutex_lock_iothread(); /* Simply continue the vcpu_run if system call interrupted */ if (hax_ret == -EINTR || hax_ret == -EAGAIN) { DPRINTF("io window interrupted\n"); continue; } if (hax_ret < 0) { fprintf(stderr, "vcpu run failed for vcpu %x\n", vcpu->vcpu_id); abort(); } switch (ht->_exit_status) { case HAX_EXIT_IO: ret = hax_handle_io(env, ht->pio._df, ht->pio._port, ht->pio._direction, ht->pio._size, ht->pio._count, vcpu->iobuf); break; case HAX_EXIT_FAST_MMIO: ret = hax_handle_fastmmio(env, (struct hax_fastmmio *) vcpu->iobuf); break; /* Guest state changed, currently only for shutdown */ case HAX_EXIT_STATECHANGE: fprintf(stdout, "VCPU shutdown request\n"); qemu_system_shutdown_request(SHUTDOWN_CAUSE_GUEST_SHUTDOWN); hax_vcpu_sync_state(env, 0); ret = 1; break; case HAX_EXIT_UNKNOWN_VMEXIT: fprintf(stderr, "Unknown VMX exit %x from guest\n", ht->_exit_reason); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); hax_vcpu_sync_state(env, 0); cpu_dump_state(cpu, stderr, fprintf, 0); ret = -1; break; case HAX_EXIT_HLT: if (!(cpu->interrupt_request & CPU_INTERRUPT_HARD) && !(cpu->interrupt_request & CPU_INTERRUPT_NMI)) { /* hlt instruction with interrupt disabled is shutdown */ env->eflags |= IF_MASK; cpu->halted = 1; cpu->exception_index = EXCP_HLT; ret = 1; } break; /* these situations will continue to hax module */ case HAX_EXIT_INTERRUPT: case HAX_EXIT_PAUSED: break; case HAX_EXIT_MMIO: /* Should not happen on UG system */ fprintf(stderr, "HAX: unsupported MMIO emulation\n"); ret = -1; break; case HAX_EXIT_REAL: /* Should not happen on UG system */ fprintf(stderr, "HAX: unimplemented real mode emulation\n"); ret = -1; break; default: fprintf(stderr, "Unknown exit %x from HAX\n", ht->_exit_status); qemu_system_reset_request(SHUTDOWN_CAUSE_GUEST_RESET); hax_vcpu_sync_state(env, 0); cpu_dump_state(cpu, stderr, fprintf, 0); ret = 1; break; } } while (!ret); if (cpu->exit_request) { cpu->exit_request = 0; cpu->exception_index = EXCP_INTERRUPT; } return ret < 0; }
static uint32_t get_elf_hwcap(void) { X86CPU *cpu = X86_CPU(thread_cpu); return cpu->env.features[FEAT_1_EDX]; }