static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu) { int i; int paramnr = 4; int ret = H_SUCCESS; mutex_lock(&vcpu->kvm->arch.hpt_mutex); for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) { unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i)); unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1); unsigned long pteg, rb, flags; unsigned long pte[2]; unsigned long v = 0; if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) { break; /* Exit success */ } else if ((tsh & H_BULK_REMOVE_TYPE) != H_BULK_REMOVE_REQUEST) { ret = H_PARAMETER; break; /* Exit fail */ } tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS; tsh |= H_BULK_REMOVE_RESPONSE; if ((tsh & H_BULK_REMOVE_ANDCOND) && (tsh & H_BULK_REMOVE_AVPN)) { tsh |= H_BULK_REMOVE_PARM; kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); ret = H_PARAMETER; break; /* Exit fail */ } pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); /* tsl = AVPN */ flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) || ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) { tsh |= H_BULK_REMOVE_NOT_FOUND; } else { /* Splat the pteg in (userland) hpt */ copy_to_user((void __user *)pteg, &v, sizeof(v)); rb = compute_tlbie_rb(pte[0], pte[1], tsh & H_BULK_REMOVE_PTEX); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); tsh |= H_BULK_REMOVE_SUCCESS; tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43; } kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh); } mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; }
static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu) { unsigned long flags= kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long v = 0, pteg, rb; unsigned long pte[2]; long int ret; pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) || ((flags & H_ANDCOND) && (pte[0] & avpn) != 0)) goto done; copy_to_user((void __user *)pteg, &v, sizeof(v)); rb = compute_tlbie_rb(pte[0], pte[1], pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); ret = H_SUCCESS; kvmppc_set_gpr(vcpu, 4, pte[0]); kvmppc_set_gpr(vcpu, 5, pte[1]); done: mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; }
int kvm_arch_vcpu_ioctl_set_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs) { int i; vcpu_load(vcpu); vcpu->arch.pc = regs->pc; kvmppc_set_cr(vcpu, regs->cr); vcpu->arch.ctr = regs->ctr; vcpu->arch.lr = regs->lr; kvmppc_set_xer(vcpu, regs->xer); kvmppc_set_msr(vcpu, regs->msr); vcpu->arch.srr0 = regs->srr0; vcpu->arch.srr1 = regs->srr1; vcpu->arch.sprg0 = regs->sprg0; vcpu->arch.sprg1 = regs->sprg1; vcpu->arch.sprg2 = regs->sprg2; vcpu->arch.sprg3 = regs->sprg3; vcpu->arch.sprg5 = regs->sprg4; vcpu->arch.sprg6 = regs->sprg5; vcpu->arch.sprg7 = regs->sprg6; for (i = 0; i < ARRAY_SIZE(regs->gpr); i++) kvmppc_set_gpr(vcpu, i, regs->gpr[i]); vcpu_put(vcpu); return 0; }
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { int i; int r; vcpu->arch.pc = 0; vcpu->arch.shared->msr = 0; vcpu->arch.shadow_msr = MSR_USER | MSR_DE | MSR_IS | MSR_DS; vcpu->arch.shared->pir = vcpu->vcpu_id; kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ vcpu->arch.shadow_pid = 1; /* Eye-catching numbers so we know if the guest takes an interrupt * before it's programmed its own IVPR/IVORs. */ vcpu->arch.ivpr = 0x55550000; for (i = 0; i < BOOKE_IRQPRIO_MAX; i++) vcpu->arch.ivor[i] = 0x7700 | i * 4; kvmppc_init_timing_stats(vcpu); r = kvmppc_core_vcpu_setup(vcpu); kvmppc_sanity_check(vcpu); return r; }
static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu) { long flags = kvmppc_get_gpr(vcpu, 4); long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long pteg[2 * 8]; unsigned long pteg_addr, i, *hpte; long int ret; i = pte_index & 7; pte_index &= ~7UL; pteg_addr = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg)); hpte = pteg; ret = H_PTEG_FULL; if (likely((flags & H_EXACT) == 0)) { for (i = 0; ; ++i) { if (i == 8) goto done; if ((*hpte & HPTE_V_VALID) == 0) break; hpte += 2; } } else { hpte += i * 2; if (*hpte & HPTE_V_VALID) goto done; } hpte[0] = kvmppc_get_gpr(vcpu, 6); hpte[1] = kvmppc_get_gpr(vcpu, 7); pteg_addr += i * HPTE_SIZE; copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE); kvmppc_set_gpr(vcpu, 4, pte_index | i); ret = H_SUCCESS; done: mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; }
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) kvmppc_complete_mmio_load(vcpu, run); vcpu->mmio_needed = 0; } else if (vcpu->arch.dcr_needed) { if (!vcpu->arch.dcr_is_write) kvmppc_complete_dcr_load(vcpu, run); vcpu->arch.dcr_needed = 0; } else if (vcpu->arch.osi_needed) { u64 *gprs = run->osi.gprs; int i; for (i = 0; i < 32; i++) kvmppc_set_gpr(vcpu, i, gprs[i]); vcpu->arch.osi_needed = 0; } else if (vcpu->arch.hcall_needed) { int i; kvmppc_set_gpr(vcpu, 3, run->papr_hcall.ret); for (i = 0; i < 9; ++i) kvmppc_set_gpr(vcpu, 4 + i, run->papr_hcall.args[i]); vcpu->arch.hcall_needed = 0; #ifdef CONFIG_BOOKE } else if (vcpu->arch.epr_needed) { kvmppc_set_epr(vcpu, run->epr.epr); vcpu->arch.epr_needed = 0; #endif } r = kvmppc_vcpu_run(run, vcpu); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { int emulated = EMULATE_DONE; switch (sprn) { case SPRN_PID: kvmppc_set_gpr(vcpu, rt, vcpu->arch.pid); break; case SPRN_MMUCR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.mmucr); break; case SPRN_CCR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr0); break; case SPRN_CCR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ccr1); break; default: emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); } kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); return emulated; }
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { int nr = kvmppc_get_gpr(vcpu, 11); int r; unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); unsigned long r2 = 0; if (!(vcpu->arch.shared->msr & MSR_SF)) { /* 32 bit mode */ param1 &= 0xffffffff; param2 &= 0xffffffff; param3 &= 0xffffffff; param4 &= 0xffffffff; } switch (nr) { case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): { vcpu->arch.magic_page_pa = param1; vcpu->arch.magic_page_ea = param2; r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; r = EV_SUCCESS; break; } case KVM_HCALL_TOKEN(KVM_HC_FEATURES): r = EV_SUCCESS; #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) /* XXX Missing magic page on 44x */ r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); #endif /* Second return value is in r4 */ break; case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); break; default: r = EV_UNIMPLEMENTED; break; } kvmppc_set_gpr(vcpu, 4, r2); return r; }
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu) { unsigned long liobn = kvmppc_get_gpr(vcpu, 4); unsigned long ioba = kvmppc_get_gpr(vcpu, 5); unsigned long tce = kvmppc_get_gpr(vcpu, 6); long rc; rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce); if (rc == H_TOO_HARD) return EMULATE_FAIL; kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; }
static int emulate_mfdcr(struct kvm_vcpu *vcpu, int rt, int dcrn) { /* The guest may access CPR0 registers to determine the timebase * frequency, and it must know the real host frequency because it * can directly access the timebase registers. * * It would be possible to emulate those accesses in userspace, * but userspace can really only figure out the end frequency. * We could decompose that into the factors that compute it, but * that's tricky math, and it's easier to just report the real * CPR0 values. */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); break; case DCRN_CPR0_CONFIG_DATA: local_irq_disable(); mtdcr(DCRN_CPR0_CONFIG_ADDR, vcpu->arch.cpr0_cfgaddr); kvmppc_set_gpr(vcpu, rt, mfdcr(DCRN_CPR0_CONFIG_DATA)); local_irq_enable(); break; default: vcpu->run->dcr.dcrn = dcrn; vcpu->run->dcr.data = 0; vcpu->run->dcr.is_write = 0; vcpu->arch.dcr_is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); return EMULATE_DO_DCR; } return EMULATE_DONE; }
/* Initial guest state: 16MB mapping 0 -> 0, PC = 0, MSR = 0, R1 = 16MB */ int kvm_arch_vcpu_setup(struct kvm_vcpu *vcpu) { vcpu->arch.pc = 0; vcpu->arch.msr = 0; kvmppc_set_gpr(vcpu, 1, (16<<20) - 8); /* -8 for the callee-save LR slot */ vcpu->arch.shadow_pid = 1; /* Eye-catching number so we know if the guest takes an interrupt * before it's programmed its own IVPR. */ vcpu->arch.ivpr = 0x55550000; kvmppc_init_timing_stats(vcpu); return kvmppc_core_vcpu_setup(vcpu); }
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { int nr = kvmppc_get_gpr(vcpu, 11); int r; unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); unsigned long r2 = 0; if (!(vcpu->arch.shared->msr & MSR_SF)) { /* 32 bit mode */ param1 &= 0xffffffff; param2 &= 0xffffffff; param3 &= 0xffffffff; param4 &= 0xffffffff; } switch (nr) { case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE: { vcpu->arch.magic_page_pa = param1; vcpu->arch.magic_page_ea = param2; r2 = KVM_MAGIC_FEAT_SR; r = HC_EV_SUCCESS; break; } case HC_VENDOR_KVM | KVM_HC_FEATURES: r = HC_EV_SUCCESS; #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500) /* XXX Missing magic page on 44x */ r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); #endif /* Second return value is in r4 */ break; default: r = HC_EV_UNIMPLEMENTED; break; } kvmppc_set_gpr(vcpu, 4, r2); return r; }
int kvmppc_h_pr(struct kvm_vcpu *vcpu, unsigned long cmd) { int rc, idx; switch (cmd) { case H_ENTER: return kvmppc_h_pr_enter(vcpu); case H_REMOVE: return kvmppc_h_pr_remove(vcpu); case H_PROTECT: return kvmppc_h_pr_protect(vcpu); case H_BULK_REMOVE: return kvmppc_h_pr_bulk_remove(vcpu); case H_PUT_TCE: return kvmppc_h_pr_put_tce(vcpu); case H_CEDE: vcpu->arch.shared->msr |= MSR_EE; kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); vcpu->stat.halt_wakeup++; return EMULATE_DONE; case H_XIRR: case H_CPPR: case H_EOI: case H_IPI: case H_IPOLL: case H_XIRR_X: if (kvmppc_xics_enabled(vcpu)) return kvmppc_h_pr_xics_hcall(vcpu, cmd); break; case H_RTAS: if (list_empty(&vcpu->kvm->arch.rtas_tokens)) break; idx = srcu_read_lock(&vcpu->kvm->srcu); rc = kvmppc_rtas_hcall(vcpu); srcu_read_unlock(&vcpu->kvm->srcu, idx); if (rc) break; kvmppc_set_gpr(vcpu, 3, 0); return EMULATE_DONE; } return EMULATE_FAIL; }
static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu) { unsigned long flags = kvmppc_get_gpr(vcpu, 4); unsigned long pte_index = kvmppc_get_gpr(vcpu, 5); unsigned long avpn = kvmppc_get_gpr(vcpu, 6); unsigned long rb, pteg, r, v; unsigned long pte[2]; long int ret; pteg = get_pteg_addr(vcpu, pte_index); mutex_lock(&vcpu->kvm->arch.hpt_mutex); copy_from_user(pte, (void __user *)pteg, sizeof(pte)); ret = H_NOT_FOUND; if ((pte[0] & HPTE_V_VALID) == 0 || ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn)) goto done; v = pte[0]; r = pte[1]; r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI | HPTE_R_KEY_LO); r |= (flags << 55) & HPTE_R_PP0; r |= (flags << 48) & HPTE_R_KEY_HI; r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO); pte[1] = r; rb = compute_tlbie_rb(v, r, pte_index); vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false); copy_to_user((void __user *)pteg, pte, sizeof(pte)); ret = H_SUCCESS; done: mutex_unlock(&vcpu->kvm->arch.hpt_mutex); kvmppc_set_gpr(vcpu, 3, ret); return EMULATE_DONE; }
int kvm_arch_vcpu_ioctl_run(struct kvm_vcpu *vcpu, struct kvm_run *run) { int r; sigset_t sigsaved; if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &vcpu->sigset, &sigsaved); if (vcpu->mmio_needed) { if (!vcpu->mmio_is_write) kvmppc_complete_mmio_load(vcpu, run); vcpu->mmio_needed = 0; } else if (vcpu->arch.dcr_needed) { if (!vcpu->arch.dcr_is_write) kvmppc_complete_dcr_load(vcpu, run); vcpu->arch.dcr_needed = 0; } else if (vcpu->arch.osi_needed) { u64 *gprs = run->osi.gprs; int i; for (i = 0; i < 32; i++) kvmppc_set_gpr(vcpu, i, gprs[i]); vcpu->arch.osi_needed = 0; } kvmppc_core_deliver_interrupts(vcpu); local_irq_disable(); kvm_guest_enter(); r = __kvmppc_vcpu_run(run, vcpu); kvm_guest_exit(); local_irq_enable(); if (vcpu->sigset_active) sigprocmask(SIG_SETMASK, &sigsaved, NULL); return r; }
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu) { int nr = kvmppc_get_gpr(vcpu, 11); int r; unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3); unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4); unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5); unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6); unsigned long r2 = 0; if (!(kvmppc_get_msr(vcpu) & MSR_SF)) { /* 32 bit mode */ param1 &= 0xffffffff; param2 &= 0xffffffff; param3 &= 0xffffffff; param4 &= 0xffffffff; } switch (nr) { case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE): { #if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE) /* Book3S can be little endian, find it out here */ int shared_big_endian = true; if (vcpu->arch.intr_msr & MSR_LE) shared_big_endian = false; if (shared_big_endian != vcpu->arch.shared_big_endian) kvmppc_swab_shared(vcpu); vcpu->arch.shared_big_endian = shared_big_endian; #endif if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) { /* * Older versions of the Linux magic page code had * a bug where they would map their trampoline code * NX. If that's the case, remove !PR NX capability. */ vcpu->arch.disable_kernel_nx = true; kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu); } vcpu->arch.magic_page_pa = param1 & ~0xfffULL; vcpu->arch.magic_page_ea = param2 & ~0xfffULL; #ifdef CONFIG_PPC_64K_PAGES /* * Make sure our 4k magic page is in the same window of a 64k * page within the guest and within the host's page. */ if ((vcpu->arch.magic_page_pa & 0xf000) != ((ulong)vcpu->arch.shared & 0xf000)) { void *old_shared = vcpu->arch.shared; ulong shared = (ulong)vcpu->arch.shared; void *new_shared; shared &= PAGE_MASK; shared |= vcpu->arch.magic_page_pa & 0xf000; new_shared = (void*)shared; memcpy(new_shared, old_shared, 0x1000); vcpu->arch.shared = new_shared; } #endif r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7; r = EV_SUCCESS; break; } case KVM_HCALL_TOKEN(KVM_HC_FEATURES): r = EV_SUCCESS; #if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2) r2 |= (1 << KVM_FEATURE_MAGIC_PAGE); #endif /* Second return value is in r4 */ break; case EV_HCALL_TOKEN(EV_IDLE): r = EV_SUCCESS; kvm_vcpu_block(vcpu); clear_bit(KVM_REQ_UNHALT, &vcpu->requests); break; default: r = EV_UNIMPLEMENTED; break; } kvmppc_set_gpr(vcpu, 4, r2); return r; }
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int rs = get_rs(inst); int rt = get_rt(inst); switch (get_op(inst)) { case 19: switch (get_xop(inst)) { case OP_19_XOP_RFI: kvmppc_emul_rfi(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); *advance = 0; break; case OP_19_XOP_RFCI: kvmppc_emul_rfci(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS); *advance = 0; break; default: emulated = EMULATE_FAIL; break; } break; case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); break; case OP_31_XOP_MTMSR: kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_WRTEE: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; case OP_31_XOP_WRTEEI: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (inst & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } return emulated; }
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int dcrn; int ra; int rb; int rc; int rs; int rt; int ws; switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case XOP_MFDCR: dcrn = get_dcrn(inst); rt = get_rt(inst); /* The guest may access CPR0 registers to determine the timebase * frequency, and it must know the real host frequency because it * can directly access the timebase registers. * * It would be possible to emulate those accesses in userspace, * but userspace can really only figure out the end frequency. * We could decompose that into the factors that compute it, but * that's tricky math, and it's easier to just report the real * CPR0 values. */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); break; case DCRN_CPR0_CONFIG_DATA: local_irq_disable(); mtdcr(DCRN_CPR0_CONFIG_ADDR, vcpu->arch.cpr0_cfgaddr); kvmppc_set_gpr(vcpu, rt, mfdcr(DCRN_CPR0_CONFIG_DATA)); local_irq_enable(); break; default: run->dcr.dcrn = dcrn; run->dcr.data = 0; run->dcr.is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; } break; case XOP_MTDCR: dcrn = get_dcrn(inst); rs = get_rs(inst); /* emulate some access in kernel */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); break; default: run->dcr.dcrn = dcrn; run->dcr.data = kvmppc_get_gpr(vcpu, rs); run->dcr.is_write = 1; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; } break; case XOP_TLBWE: ra = get_ra(inst); rs = get_rs(inst); ws = get_ws(inst); emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); break; case XOP_TLBSX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); rc = get_rc(inst); emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); break; case XOP_ICCCI: break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); return emulated; }
int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); enum emulation_result emulated = EMULATE_DONE; int ax_rd = inst_get_field(inst, 6, 10); int ax_ra = inst_get_field(inst, 11, 15); int ax_rb = inst_get_field(inst, 16, 20); int ax_rc = inst_get_field(inst, 21, 25); short full_d = inst_get_field(inst, 16, 31); u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; bool rcomp = (inst & 1) ? true : false; u32 cr = kvmppc_get_cr(vcpu); #ifdef DEBUG int i; #endif if (!kvmppc_inst_is_paired_single(vcpu, inst)) return EMULATE_FAIL; if (!(vcpu->arch.shared->msr & MSR_FP)) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); return EMULATE_AGAIN; } kvmppc_giveup_ext(vcpu, MSR_FP); preempt_disable(); enable_kernel_fp(); /* Do we need to clear FE0 / FE1 here? Don't think so. */ #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; kvm_cvt_df(&vcpu->arch.fpr[i], &f); dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); } #endif switch (get_op(inst)) { case OP_PSQ_L: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_LU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_PSQ_ST: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_STU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 4: /* X form */ switch (inst_get_field(inst, 21, 30)) { case OP_4X_PS_CMPU0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); break; } case OP_4X_PS_CMPO0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_4X_PS_NEG: vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] ^= 0x80000000; break; case OP_4X_PS_CMPU1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_MR: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_CMPO1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_NABS: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] |= 0x80000000; break; case OP_4X_PS_ABS: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] &= ~0x80000000; break; case OP_4X_PS_MERGE00: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ kvm_cvt_df(&vcpu->arch.fpr[ax_rb], &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE01: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_MERGE10: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &vcpu->arch.fpr[ax_rd]); /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ kvm_cvt_df(&vcpu->arch.fpr[ax_rb], &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE11: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &vcpu->arch.fpr[ax_rd]); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; } /* XW form */ switch (inst_get_field(inst, 25, 30)) { case OP_4XW_PSQ_STX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); break; } case OP_4XW_PSQ_STUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } } /* A form */ switch (inst_get_field(inst, 26, 30)) { case OP_4A_PS_SUM1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; break; case OP_4A_PS_SUM0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; break; case OP_4A_PS_MULS0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls); break; case OP_4A_PS_MULS1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_LOW, fps_fmuls); break; case OP_4A_PS_MADDS0: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds); break; case OP_4A_PS_MADDS1: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds); break; case OP_4A_PS_DIV: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fdivs); break; case OP_4A_PS_SUB: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fsubs); break; case OP_4A_PS_ADD: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fadds); break; case OP_4A_PS_SEL: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel); break; case OP_4A_PS_RES: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_fres); break; case OP_4A_PS_MUL: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_NONE, fps_fmuls); break; case OP_4A_PS_RSQRTE: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_frsqrte); break; case OP_4A_PS_MSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs); break; case OP_4A_PS_MADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds); break; case OP_4A_PS_NMSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs); break; case OP_4A_PS_NMADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds); break; } break; /* Real FPU operations */ case OP_LFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_LFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_LFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_LFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_STFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_STFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 31: switch (inst_get_field(inst, 21, 30)) { case OP_31_LFSX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_LFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_LFDX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_LFDUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFSX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_STFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_STFUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFIWX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE_LOW); break; } break; } break; case 59: switch (inst_get_field(inst, 21, 30)) { case OP_59_FADDS: fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FSUBS: fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FDIVS: fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRES: fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRSQRTES: fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } switch (inst_get_field(inst, 26, 30)) { case OP_59_FMULS: fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMSUBS: fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMADDS: fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMSUBS: fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMADDS: fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } break; case 63: switch (inst_get_field(inst, 21, 30)) { case OP_63_MTFSB0: case OP_63_MTFSB1: case OP_63_MCRFS: case OP_63_MTFSFI: /* XXX need to implement */ break; case OP_63_MFFS: /* XXX missing CR */ *fpr_d = vcpu->arch.fpscr; break; case OP_63_MTFSF: /* XXX missing fm bits */ /* XXX missing CR */ vcpu->arch.fpscr = *fpr_b; break; case OP_63_FCMPU: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FCMPO: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FNEG: fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FMR: *fpr_d = *fpr_b; break; case OP_63_FABS: fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCPSGN: fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FDIV: fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FADD: fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FSUB: fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FCTIW: fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCTIWZ: fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FRSP: fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_63_FRSQRTE: { double one = 1.0f; /* fD = sqrt(fB) */ fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); /* fD = 1.0f / fD */ fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); break; } } switch (inst_get_field(inst, 26, 30)) { case OP_63_FMUL: fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); break; case OP_63_FSEL: fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMSUB: fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMADD: fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMSUB: fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMADD: fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; } break; } #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; kvm_cvt_df(&vcpu->arch.fpr[i], &f); dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); } #endif if (rcomp) kvmppc_set_cr(vcpu, cr); preempt_enable(); return emulated; }
int kvmppc_booke_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { int emulated = EMULATE_DONE; switch (sprn) { case SPRN_IVPR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivpr); break; case SPRN_DEAR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->dar); break; case SPRN_ESR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->esr); break; case SPRN_DBCR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr0); break; case SPRN_DBCR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbcr1); break; case SPRN_DBSR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.dbsr); break; case SPRN_TSR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.tsr); break; case SPRN_TCR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.tcr); break; case SPRN_IVOR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL]); break; case SPRN_IVOR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK]); break; case SPRN_IVOR2: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE]); break; case SPRN_IVOR3: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE]); break; case SPRN_IVOR4: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL]); break; case SPRN_IVOR5: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT]); break; case SPRN_IVOR6: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM]); break; case SPRN_IVOR7: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL]); break; case SPRN_IVOR8: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL]); break; case SPRN_IVOR9: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL]); break; case SPRN_IVOR10: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER]); break; case SPRN_IVOR11: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_FIT]); break; case SPRN_IVOR12: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG]); break; case SPRN_IVOR13: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS]); break; case SPRN_IVOR14: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS]); break; case SPRN_IVOR15: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG]); break; default: emulated = EMULATE_FAIL; } return emulated; }
/* XXX to do: * lhax * lhaux * lswx * lswi * stswx * stswi * lha * lhau * lmw * stmw * */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; u32 inst; int ra, rs, rt; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, false, &inst); if (emulated != EMULATE_DONE) return emulated; ra = get_ra(inst); rs = get_rs(inst); rt = get_rt(inst); switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: emulated = EMULATE_FAIL; break; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; break; } if (emulated == EMULATE_FAIL) { advance = 0; kvmppc_core_queue_program(vcpu, 0); } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
static int kvmppc_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { enum emulation_result emulated = EMULATE_DONE; ulong spr_val = 0; switch (sprn) { case SPRN_SRR0: spr_val = vcpu->arch.shared->srr0; break; case SPRN_SRR1: spr_val = vcpu->arch.shared->srr1; break; case SPRN_PVR: spr_val = vcpu->arch.pvr; break; case SPRN_PIR: spr_val = vcpu->vcpu_id; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: spr_val = get_tb() >> 32; break; case SPRN_TBWU: spr_val = get_tb(); break; case SPRN_SPRG0: spr_val = vcpu->arch.shared->sprg0; break; case SPRN_SPRG1: spr_val = vcpu->arch.shared->sprg1; break; case SPRN_SPRG2: spr_val = vcpu->arch.shared->sprg2; break; case SPRN_SPRG3: spr_val = vcpu->arch.shared->sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: spr_val = kvmppc_get_dec(vcpu, get_tb()); break; default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, &spr_val); if (unlikely(emulated == EMULATE_FAIL)) { printk(KERN_INFO "mfspr: unknown spr " "0x%x\n", sprn); } break; } if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, rt, spr_val); kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); return emulated; }
static void kvmppc_complete_dcr_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, run->dcr.data); }
static int kvmppc_h_pr_xics_hcall(struct kvm_vcpu *vcpu, u32 cmd) { long rc = kvmppc_xics_hcall(vcpu, cmd); kvmppc_set_gpr(vcpu, 3, rc); return EMULATE_DONE; }
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { u64 uninitialized_var(gpr); if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); return; } if (vcpu->arch.mmio_is_bigendian) { switch (run->mmio.len) { case 8: gpr = *(u64 *)run->mmio.data; break; case 4: gpr = *(u32 *)run->mmio.data; break; case 2: gpr = *(u16 *)run->mmio.data; break; case 1: gpr = *(u8 *)run->mmio.data; break; } } else { /* Convert BE data from userland back to LE. */ switch (run->mmio.len) { case 4: gpr = ld_le32((u32 *)run->mmio.data); break; case 2: gpr = ld_le16((u16 *)run->mmio.data); break; case 1: gpr = *(u8 *)run->mmio.data; break; } } if (vcpu->arch.mmio_sign_extend) { switch (run->mmio.len) { #ifdef CONFIG_PPC64 case 4: gpr = (s64)(s32)gpr; break; #endif case 2: gpr = (s64)(s16)gpr; break; case 1: gpr = (s64)(s8)gpr; break; } } kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { case KVM_MMIO_REG_GPR: kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; case KVM_MMIO_REG_FPR: vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: vcpu->arch.fpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif default: BUG(); } }
static void kvmppc_complete_mmio_load(struct kvm_vcpu *vcpu, struct kvm_run *run) { u64 uninitialized_var(gpr); if (run->mmio.len > sizeof(gpr)) { printk(KERN_ERR "bad MMIO length: %d\n", run->mmio.len); return; } if (!vcpu->arch.mmio_host_swabbed) { switch (run->mmio.len) { case 8: gpr = *(u64 *)run->mmio.data; break; case 4: gpr = *(u32 *)run->mmio.data; break; case 2: gpr = *(u16 *)run->mmio.data; break; case 1: gpr = *(u8 *)run->mmio.data; break; } } else { switch (run->mmio.len) { case 8: gpr = swab64(*(u64 *)run->mmio.data); break; case 4: gpr = swab32(*(u32 *)run->mmio.data); break; case 2: gpr = swab16(*(u16 *)run->mmio.data); break; case 1: gpr = *(u8 *)run->mmio.data; break; } } if (vcpu->arch.mmio_sign_extend) { switch (run->mmio.len) { #ifdef CONFIG_PPC64 case 4: gpr = (s64)(s32)gpr; break; #endif case 2: gpr = (s64)(s16)gpr; break; case 1: gpr = (s64)(s8)gpr; break; } } kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); switch (vcpu->arch.io_gpr & KVM_MMIO_REG_EXT_MASK) { case KVM_MMIO_REG_GPR: kvmppc_set_gpr(vcpu, vcpu->arch.io_gpr, gpr); break; case KVM_MMIO_REG_FPR: VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; break; #ifdef CONFIG_PPC_BOOK3S case KVM_MMIO_REG_QPR: vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; case KVM_MMIO_REG_FQPR: VCPU_FPR(vcpu, vcpu->arch.io_gpr & KVM_MMIO_REG_MASK) = gpr; vcpu->arch.qpr[vcpu->arch.io_gpr & KVM_MMIO_REG_MASK] = gpr; break; #endif default: BUG(); } }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; ulong spr_val = 0; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: switch (sprn) { case SPRN_SRR0: spr_val = vcpu->arch.shared->srr0; break; case SPRN_SRR1: spr_val = vcpu->arch.shared->srr1; break; case SPRN_PVR: spr_val = vcpu->arch.pvr; break; case SPRN_PIR: spr_val = vcpu->vcpu_id; break; case SPRN_MSSSR0: spr_val = 0; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: spr_val = get_tb() >> 32; break; case SPRN_TBWU: spr_val = get_tb(); break; case SPRN_SPRG0: spr_val = vcpu->arch.shared->sprg0; break; case SPRN_SPRG1: spr_val = vcpu->arch.shared->sprg1; break; case SPRN_SPRG2: spr_val = vcpu->arch.shared->sprg2; break; case SPRN_SPRG3: spr_val = vcpu->arch.shared->sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: spr_val = kvmppc_get_dec(vcpu, get_tb()); break; default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, &spr_val); if (unlikely(emulated == EMULATE_FAIL)) { printk(KERN_INFO "mfspr: unknown spr " "0x%x\n", sprn); } break; } kvmppc_set_gpr(vcpu, rt, spr_val); kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_SRR0: vcpu->arch.shared->srr0 = spr_val; break; case SPRN_SRR1: vcpu->arch.shared->srr1 = spr_val; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = spr_val; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.shared->sprg0 = spr_val; break; case SPRN_SPRG1: vcpu->arch.shared->sprg1 = spr_val; break; case SPRN_SPRG2: vcpu->arch.shared->sprg2 = spr_val; break; case SPRN_SPRG3: vcpu->arch.shared->sprg3 = spr_val; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, spr_val); if (emulated == EMULATE_FAIL) printk(KERN_INFO "mtspr: unknown spr " "0x%x\n", sprn); break; } kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); break; case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
int kvmppc_core_emulate_mfspr(struct kvm_vcpu *vcpu, int sprn, int rt) { struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu); int emulated = EMULATE_DONE; unsigned long val; switch (sprn) { case SPRN_PID: kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[0]); break; case SPRN_PID1: kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[1]); break; case SPRN_PID2: kvmppc_set_gpr(vcpu, rt, vcpu_e500->pid[2]); break; case SPRN_MAS0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas0); break; case SPRN_MAS1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas1); break; case SPRN_MAS2: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas2); break; case SPRN_MAS3: val = (u32)vcpu->arch.shared->mas7_3; kvmppc_set_gpr(vcpu, rt, val); break; case SPRN_MAS4: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas4); break; case SPRN_MAS6: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->mas6); break; case SPRN_MAS7: val = vcpu->arch.shared->mas7_3 >> 32; kvmppc_set_gpr(vcpu, rt, val); break; case SPRN_TLB0CFG: kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb0cfg); break; case SPRN_TLB1CFG: kvmppc_set_gpr(vcpu, rt, vcpu_e500->tlb1cfg); break; case SPRN_L1CSR0: kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr0); break; case SPRN_L1CSR1: kvmppc_set_gpr(vcpu, rt, vcpu_e500->l1csr1); break; case SPRN_HID0: kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid0); break; case SPRN_HID1: kvmppc_set_gpr(vcpu, rt, vcpu_e500->hid1); break; case SPRN_SVR: kvmppc_set_gpr(vcpu, rt, vcpu_e500->svr); break; case SPRN_MMUCSR0: kvmppc_set_gpr(vcpu, rt, 0); break; case SPRN_MMUCFG: kvmppc_set_gpr(vcpu, rt, mfspr(SPRN_MMUCFG)); break; /* extra exceptions */ case SPRN_IVOR32: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL]); break; case SPRN_IVOR33: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA]); break; case SPRN_IVOR34: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND]); break; case SPRN_IVOR35: kvmppc_set_gpr(vcpu, rt, vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR]); break; default: emulated = kvmppc_booke_emulate_mfspr(vcpu, sprn, rt); } return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, rs, ea); break; case OP_31_XOP_LHAX: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; case SPRN_SRR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; case SPRN_PVR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; case SPRN_PIR: kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; case SPRN_MSSSR0: kvmppc_set_gpr(vcpu, rt, 0); break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; case SPRN_TBWU: kvmppc_set_gpr(vcpu, rt, get_tb()); break; case SPRN_SPRG0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; case SPRN_SPRG1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; case SPRN_SPRG2: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; case SPRN_SPRG3: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: { u64 jd = get_tb() - vcpu->arch.dec_jiffies; kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, kvmppc_get_gpr(vcpu, rt)); break; } default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); kvmppc_set_gpr(vcpu, rt, 0); } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SRR1: vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG1: vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG2: vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG3: vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHA: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }