void kvm_show_regs(kvm_context_t kvm, int vcpu) { struct kvm_regs regs; int i; if (kvm_get_regs(kvm, vcpu, ®s)) return; fprintf(stderr,"guest vcpu #%d\n", vcpu); fprintf(stderr,"pc: %016"PRIx64" msr: %016"PRIx64"\n", regs.pc, regs.msr); fprintf(stderr,"lr: %016"PRIx64" ctr: %016"PRIx64"\n", regs.lr, regs.ctr); fprintf(stderr,"srr0: %016"PRIx64" srr1: %016"PRIx64"\n", regs.srr0, regs.srr1); for (i=0; i<32; i+=4) { fprintf(stderr, "gpr%02d: %016"PRIx64" %016"PRIx64" %016"PRIx64 " %016"PRIx64"\n", i, regs.gpr[i], regs.gpr[i+1], regs.gpr[i+2], regs.gpr[i+3]); } fflush(stdout); }
static int handle_cpuid(kvm_context_t kvm, struct kvm_run *run, int vcpu) { struct kvm_regs regs; uint32_t orig_eax; uint64_t rax, rbx, rcx, rdx; int r; kvm_get_regs(kvm, vcpu, ®s); orig_eax = (uint32_t)regs.rax; rax = regs.rax; rbx = regs.rbx; rcx = regs.rcx; rdx = regs.rdx; r = kvm->callbacks->cpuid(kvm->opaque, &rax, &rbx, &rcx, &rdx); regs.rax = rax; regs.rbx = rbx; regs.rcx = rcx; regs.rdx = rdx; if (orig_eax == 1) regs.rdx &= ~(1ull << 12); /* disable mtrr support */ kvm_set_regs(kvm, vcpu, ®s); run->emulated = 1; return r; }
void kvm_arch_save_regs(CPUState *env) { struct kvm_regs regs; struct kvm_fpu fpu; struct kvm_sregs sregs; struct kvm_msr_entry msrs[100]; uint32_t hflags; uint32_t i, n, rc, bit; assert(kvm_cpu_is_stopped(env) || env->thread_id == kvm_get_thread_id()); kvm_get_regs(env, ®s); env->regs[R_EAX] = regs.rax; env->regs[R_EBX] = regs.rbx; env->regs[R_ECX] = regs.rcx; env->regs[R_EDX] = regs.rdx; env->regs[R_ESI] = regs.rsi; env->regs[R_EDI] = regs.rdi; env->regs[R_ESP] = regs.rsp; env->regs[R_EBP] = regs.rbp; #ifdef TARGET_X86_64 env->regs[8] = regs.r8; env->regs[9] = regs.r9; env->regs[10] = regs.r10; env->regs[11] = regs.r11; env->regs[12] = regs.r12; env->regs[13] = regs.r13; env->regs[14] = regs.r14; env->regs[15] = regs.r15; #endif env->eflags = regs.rflags; env->eip = regs.rip; #ifdef KVM_CAP_XSAVE if (kvm_check_extension(kvm_state, KVM_CAP_XSAVE)) { struct kvm_xsave* xsave; uint16_t cwd, swd, twd, fop; xsave = qemu_memalign(4096, sizeof(struct kvm_xsave)); kvm_get_xsave(env, xsave); cwd = (uint16_t)xsave->region[0]; swd = (uint16_t)(xsave->region[0] >> 16); twd = (uint16_t)xsave->region[1]; fop = (uint16_t)(xsave->region[1] >> 16); env->fpstt = (swd >> 11) & 7; env->fpus = swd; env->fpuc = cwd; for (i = 0; i < 8; ++i) env->fptags[i] = !((twd >> i) & 1); env->mxcsr = xsave->region[XSAVE_MXCSR]; memcpy(env->fpregs, &xsave->region[XSAVE_ST_SPACE], sizeof env->fpregs); memcpy(env->xmm_regs, &xsave->region[XSAVE_XMM_SPACE], sizeof env->xmm_regs); env->xstate_bv = *(uint64_t *)&xsave->region[XSAVE_XSTATE_BV]; memcpy(env->ymmh_regs, &xsave->region[XSAVE_YMMH_SPACE], sizeof env->ymmh_regs); if (kvm_check_extension(kvm_state, KVM_CAP_XCRS)) { struct kvm_xcrs xcrs; kvm_get_xcrs(env, &xcrs); if (xcrs.xcrs[0].xcr == 0) env->xcr0 = xcrs.xcrs[0].value; } qemu_free(xsave); } else {
static int handle_io(kvm_context_t kvm, struct kvm_run *run, int vcpu) { uint16_t addr = run->io.port; struct kvm_regs regs; int first_time = 1; int delta; struct translation_cache tr; int _in = (run->io.direction == KVM_EXIT_IO_IN); int r; translation_cache_init(&tr); if (run->io.string || _in) { // r = ioctl(kvm->vcpu_fd[vcpu], KVM_GET_REGS, ®s); r = kvm_get_regs(kvm, vcpu, ®s); if (r == -1) return -1; } delta = run->io.string_down ? -run->io.size : run->io.size; while (more_io(run, first_time)) { void *value_addr; if (!run->io.string) { if (_in) value_addr = ®s.rax; else value_addr = &run->io.value; } else { r = translate(kvm, vcpu, &tr, run->io.address, &value_addr); if (r) { fprintf(stderr, "failed translating I/O address %llx\n", run->io.address); return r; } } switch (run->io.direction) { case KVM_EXIT_IO_IN: { switch (run->io.size) { case 1: { uint8_t value; r = kvm->callbacks->inb(kvm->opaque, addr, &value); *(uint8_t *)value_addr = value; break; } case 2: { uint16_t value; r = kvm->callbacks->inw(kvm->opaque, addr, &value); *(uint16_t *)value_addr = value; break; } case 4: { uint32_t value; r = kvm->callbacks->inl(kvm->opaque, addr, &value); *(uint32_t *)value_addr = value; break; } default: fprintf(stderr, "bad I/O size %d\n", run->io.size); return -EMSGSIZE; } break; } case KVM_EXIT_IO_OUT: switch (run->io.size) { case 1: r = kvm->callbacks->outb(kvm->opaque, addr, *(uint8_t *)value_addr); break; case 2: r = kvm->callbacks->outw(kvm->opaque, addr, *(uint16_t *)value_addr); break; case 4: r = kvm->callbacks->outl(kvm->opaque, addr, *(uint32_t *)value_addr); break; default: fprintf(stderr, "bad I/O size %d\n", run->io.size); return -EMSGSIZE; } break; default: fprintf(stderr, "bad I/O direction %d\n", run->io.direction); return -EPROTO; } if (run->io.string) { run->io.address += delta; switch (run->io.direction) { case KVM_EXIT_IO_IN: regs.rdi += delta; break; case KVM_EXIT_IO_OUT: regs.rsi += delta; break; } if (run->io.rep) { --regs.rcx; --run->io.count; } } first_time = 0; if (r) { int savedret = r; // r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, ®s); r = kvm_set_regs(kvm, vcpu, ®s); if (r == -1) return -1; return savedret; } } if (run->io.string || _in) { // r = ioctl(kvm->vcpu_fd[vcpu], KVM_SET_REGS, ®s); r = kvm_set_regs(kvm, vcpu, ®s); if (r == -1) return -1; } run->emulated = 1; return 0; }