static void kvm_patch_ins_mtmsr(u32 *inst, u32 rt) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_mtmsr_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_mtmsr_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } /* Modify the chunk to fit the invocation */ memcpy(p, kvm_emulate_mtmsr, kvm_emulate_mtmsr_len * 4); p[kvm_emulate_mtmsr_branch_offs] |= distance_end & KVM_INST_B_MASK; /* Make clobbered registers work too */ switch (get_rt(rt)) { case 30: kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], magic_var(scratch2), KVM_RT_30); kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], magic_var(scratch2), KVM_RT_30); break; case 31: kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg1_offs], magic_var(scratch1), KVM_RT_30); kvm_patch_ins_ll(&p[kvm_emulate_mtmsr_reg2_offs], magic_var(scratch1), KVM_RT_30); break; default: p[kvm_emulate_mtmsr_reg1_offs] |= rt; p[kvm_emulate_mtmsr_reg2_offs] |= rt; break; } p[kvm_emulate_mtmsr_orig_ins_offs] = *inst; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_mtmsr_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; ulong spr_val = 0; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: switch (sprn) { case SPRN_SRR0: spr_val = vcpu->arch.shared->srr0; break; case SPRN_SRR1: spr_val = vcpu->arch.shared->srr1; break; case SPRN_PVR: spr_val = vcpu->arch.pvr; break; case SPRN_PIR: spr_val = vcpu->vcpu_id; break; case SPRN_MSSSR0: spr_val = 0; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: spr_val = get_tb() >> 32; break; case SPRN_TBWU: spr_val = get_tb(); break; case SPRN_SPRG0: spr_val = vcpu->arch.shared->sprg0; break; case SPRN_SPRG1: spr_val = vcpu->arch.shared->sprg1; break; case SPRN_SPRG2: spr_val = vcpu->arch.shared->sprg2; break; case SPRN_SPRG3: spr_val = vcpu->arch.shared->sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: spr_val = kvmppc_get_dec(vcpu, get_tb()); break; default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, &spr_val); if (unlikely(emulated == EMULATE_FAIL)) { printk(KERN_INFO "mfspr: unknown spr " "0x%x\n", sprn); } break; } kvmppc_set_gpr(vcpu, rt, spr_val); kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_SRR0: vcpu->arch.shared->srr0 = spr_val; break; case SPRN_SRR1: vcpu->arch.shared->srr1 = spr_val; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = spr_val; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.shared->sprg0 = spr_val; break; case SPRN_SPRG1: vcpu->arch.shared->sprg1 = spr_val; break; case SPRN_SPRG2: vcpu->arch.shared->sprg2 = spr_val; break; case SPRN_SPRG3: vcpu->arch.shared->sprg3 = spr_val; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, spr_val); if (emulated == EMULATE_FAIL) printk(KERN_INFO "mtspr: unknown spr " "0x%x\n", sprn); break; } kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); break; case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int rs = get_rs(inst); int rt = get_rt(inst); switch (get_op(inst)) { case 19: switch (get_xop(inst)) { case OP_19_XOP_RFI: kvmppc_emul_rfi(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); *advance = 0; break; case OP_19_XOP_RFCI: kvmppc_emul_rfci(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS); *advance = 0; break; default: emulated = EMULATE_FAIL; break; } break; case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); break; case OP_31_XOP_MTMSR: kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_WRTEE: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; case OP_31_XOP_WRTEEI: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (inst & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } return emulated; }
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int dcrn; int ra; int rb; int rc; int rs; int rt; int ws; switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case XOP_MFDCR: dcrn = get_dcrn(inst); rt = get_rt(inst); /* The guest may access CPR0 registers to determine the timebase * frequency, and it must know the real host frequency because it * can directly access the timebase registers. * * It would be possible to emulate those accesses in userspace, * but userspace can really only figure out the end frequency. * We could decompose that into the factors that compute it, but * that's tricky math, and it's easier to just report the real * CPR0 values. */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); break; case DCRN_CPR0_CONFIG_DATA: local_irq_disable(); mtdcr(DCRN_CPR0_CONFIG_ADDR, vcpu->arch.cpr0_cfgaddr); kvmppc_set_gpr(vcpu, rt, mfdcr(DCRN_CPR0_CONFIG_DATA)); local_irq_enable(); break; default: run->dcr.dcrn = dcrn; run->dcr.data = 0; run->dcr.is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; } break; case XOP_MTDCR: dcrn = get_dcrn(inst); rs = get_rs(inst); /* emulate some access in kernel */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); break; default: run->dcr.dcrn = dcrn; run->dcr.data = kvmppc_get_gpr(vcpu, rs); run->dcr.is_write = 1; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; } break; case XOP_TLBWE: ra = get_ra(inst); rs = get_rs(inst); ws = get_ws(inst); emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); break; case XOP_TLBSX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); rc = get_rc(inst); emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); break; case XOP_ICCCI: break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = vcpu->arch.last_inst; u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); switch (get_op(inst)) { case OP_TRAP: vcpu->arch.esr |= ESR_PTR; kvmppc_core_queue_program(vcpu); advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[rs] = ea; break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; case SPRN_SRR1: vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; case SPRN_PVR: vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: vcpu->arch.gpr[rt] = mftbl(); break; case SPRN_TBWU: vcpu->arch.gpr[rt] = mftbu(); break; case SPRN_SPRG0: vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; case SPRN_SPRG1: vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; case SPRN_SPRG2: vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; case SPRN_SPRG3: vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); vcpu->arch.gpr[rt] = 0; } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; case SPRN_SRR1: vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_DEC: vcpu->arch.dec = vcpu->arch.gpr[rs]; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG1: vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG2: vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG3: vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit); if (advance) vcpu->arch.pc += 4; /* Advance past emulated instruction. */ return emulated; }
/* XXX to do: * lhax * lhaux * lswx * lswi * stswx * stswi * lha * lhau * lmw * stmw * */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; u32 inst; int ra, rs, rt; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, false, &inst); if (emulated != EMULATE_DONE) return emulated; ra = get_ra(inst); rs = get_rs(inst); rt = get_rt(inst); switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: emulated = EMULATE_FAIL; break; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; break; } if (emulated == EMULATE_FAIL) { advance = 0; kvmppc_core_queue_program(vcpu, 0); } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, rs, ea); break; case OP_31_XOP_LHAX: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; case SPRN_SRR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; case SPRN_PVR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; case SPRN_PIR: kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; case SPRN_MSSSR0: kvmppc_set_gpr(vcpu, rt, 0); break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; case SPRN_TBWU: kvmppc_set_gpr(vcpu, rt, get_tb()); break; case SPRN_SPRG0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; case SPRN_SPRG1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; case SPRN_SPRG2: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; case SPRN_SPRG3: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: { u64 jd = get_tb() - vcpu->arch.dec_jiffies; kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, kvmppc_get_gpr(vcpu, rt)); break; } default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); kvmppc_set_gpr(vcpu, rt, 0); } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SRR1: vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG1: vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG2: vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG3: vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHA: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst; int rs, rt, sprn; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); if (emulated != EMULATE_DONE) return emulated; pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); rs = get_rs(inst); rt = get_rt(inst); sprn = get_sprn(inst); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_TLBSYNC: break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case 0: /* * Instruction with primary opcode 0. Based on PowerISA * these are illegal instructions. */ if (inst == KVMPPC_INST_SW_BREAKPOINT) { run->exit_reason = KVM_EXIT_DEBUG; run->debug.arch.address = kvmppc_get_pc(vcpu); emulated = EMULATE_EXIT_USER; advance = 0; } else emulated = EMULATE_FAIL; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }