int kvmppc_emulate_mmio(struct kvm_run *run, struct kvm_vcpu *vcpu) { enum emulation_result er; int r; er = kvmppc_emulate_instruction(run, vcpu); switch (er) { case EMULATE_DONE: /* Future optimization: only reload non-volatiles if they were * actually modified. */ r = RESUME_GUEST_NV; break; case EMULATE_DO_MMIO: run->exit_reason = KVM_EXIT_MMIO; /* We must reload nonvolatiles because "update" load/store * instructions modify register state. */ /* Future optimization: only reload non-volatiles if they were * actually modified. */ r = RESUME_HOST_NV; break; case EMULATE_FAIL: /* XXX Deliver Program interrupt to guest. */ printk(KERN_EMERG "%s: emulation failed (%08x)\n", __func__, kvmppc_get_last_inst(vcpu)); r = RESUME_HOST; break; default: WARN_ON(1); r = RESUME_GUEST; } return r; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; ulong spr_val = 0; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: switch (sprn) { case SPRN_SRR0: spr_val = vcpu->arch.shared->srr0; break; case SPRN_SRR1: spr_val = vcpu->arch.shared->srr1; break; case SPRN_PVR: spr_val = vcpu->arch.pvr; break; case SPRN_PIR: spr_val = vcpu->vcpu_id; break; case SPRN_MSSSR0: spr_val = 0; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: spr_val = get_tb() >> 32; break; case SPRN_TBWU: spr_val = get_tb(); break; case SPRN_SPRG0: spr_val = vcpu->arch.shared->sprg0; break; case SPRN_SPRG1: spr_val = vcpu->arch.shared->sprg1; break; case SPRN_SPRG2: spr_val = vcpu->arch.shared->sprg2; break; case SPRN_SPRG3: spr_val = vcpu->arch.shared->sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: spr_val = kvmppc_get_dec(vcpu, get_tb()); break; default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, &spr_val); if (unlikely(emulated == EMULATE_FAIL)) { printk(KERN_INFO "mfspr: unknown spr " "0x%x\n", sprn); } break; } kvmppc_set_gpr(vcpu, rt, spr_val); kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_SRR0: vcpu->arch.shared->srr0 = spr_val; break; case SPRN_SRR1: vcpu->arch.shared->srr1 = spr_val; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = spr_val; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.shared->sprg0 = spr_val; break; case SPRN_SPRG1: vcpu->arch.shared->sprg1 = spr_val; break; case SPRN_SPRG2: vcpu->arch.shared->sprg2 = spr_val; break; case SPRN_SPRG3: vcpu->arch.shared->sprg3 = spr_val; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, spr_val); if (emulated == EMULATE_FAIL) printk(KERN_INFO "mtspr: unknown spr " "0x%x\n", sprn); break; } kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); break; case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX to do: * lhax * lhaux * lswx * lswi * stswx * stswi * lha * lhau * lmw * stmw * */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; u32 inst; int ra, rs, rt; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, false, &inst); if (emulated != EMULATE_DONE) return emulated; ra = get_ra(inst); rs = get_rs(inst); rt = get_rt(inst); switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: emulated = EMULATE_FAIL; break; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; break; } if (emulated == EMULATE_FAIL) { advance = 0; kvmppc_core_queue_program(vcpu, 0); } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); enum emulation_result emulated = EMULATE_DONE; int ax_rd = inst_get_field(inst, 6, 10); int ax_ra = inst_get_field(inst, 11, 15); int ax_rb = inst_get_field(inst, 16, 20); int ax_rc = inst_get_field(inst, 21, 25); short full_d = inst_get_field(inst, 16, 31); u64 *fpr_d = &vcpu->arch.fpr[ax_rd]; u64 *fpr_a = &vcpu->arch.fpr[ax_ra]; u64 *fpr_b = &vcpu->arch.fpr[ax_rb]; u64 *fpr_c = &vcpu->arch.fpr[ax_rc]; bool rcomp = (inst & 1) ? true : false; u32 cr = kvmppc_get_cr(vcpu); #ifdef DEBUG int i; #endif if (!kvmppc_inst_is_paired_single(vcpu, inst)) return EMULATE_FAIL; if (!(vcpu->arch.shared->msr & MSR_FP)) { kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL); return EMULATE_AGAIN; } kvmppc_giveup_ext(vcpu, MSR_FP); preempt_disable(); enable_kernel_fp(); /* Do we need to clear FE0 / FE1 here? Don't think so. */ #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; kvm_cvt_df(&vcpu->arch.fpr[i], &f); dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx QPR[%d] = 0x%x\n", i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]); } #endif switch (get_op(inst)) { case OP_PSQ_L: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_LU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_PSQ_ST: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); break; } case OP_PSQ_STU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 16, 16) ? true : false; int i = inst_get_field(inst, 17, 19); addr += get_d_signext(inst); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 4: /* X form */ switch (inst_get_field(inst, 21, 30)) { case OP_4X_PS_CMPU0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); break; } case OP_4X_PS_CMPO0: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PSQ_LUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_4X_PS_NEG: vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] ^= 0x80000000; break; case OP_4X_PS_CMPU1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_MR: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_CMPO1: /* XXX */ emulated = EMULATE_FAIL; break; case OP_4X_PS_NABS: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] |= 0x80000000; break; case OP_4X_PS_ABS: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb]; vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; vcpu->arch.qpr[ax_rd] &= ~0x80000000; break; case OP_4X_PS_MERGE00: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ kvm_cvt_df(&vcpu->arch.fpr[ax_rb], &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE01: WARN_ON(rcomp); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra]; vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; case OP_4X_PS_MERGE10: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &vcpu->arch.fpr[ax_rd]); /* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */ kvm_cvt_df(&vcpu->arch.fpr[ax_rb], &vcpu->arch.qpr[ax_rd]); break; case OP_4X_PS_MERGE11: WARN_ON(rcomp); /* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */ kvm_cvt_fd(&vcpu->arch.qpr[ax_ra], &vcpu->arch.fpr[ax_rd]); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb]; break; } /* XW form */ switch (inst_get_field(inst, 25, 30)) { case OP_4XW_PSQ_STX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); break; } case OP_4XW_PSQ_STUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra); bool w = inst_get_field(inst, 21, 21) ? true : false; int i = inst_get_field(inst, 22, 24); addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } } /* A form */ switch (inst_get_field(inst, 26, 30)) { case OP_4A_PS_SUM1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds); vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc]; break; case OP_4A_PS_SUM0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds); vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc]; break; case OP_4A_PS_MULS0: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls); break; case OP_4A_PS_MULS1: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_LOW, fps_fmuls); break; case OP_4A_PS_MADDS0: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds); break; case OP_4A_PS_MADDS1: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds); break; case OP_4A_PS_DIV: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fdivs); break; case OP_4A_PS_SUB: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fsubs); break; case OP_4A_PS_ADD: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rb, SCALAR_NONE, fps_fadds); break; case OP_4A_PS_SEL: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel); break; case OP_4A_PS_RES: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_fres); break; case OP_4A_PS_MUL: emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, SCALAR_NONE, fps_fmuls); break; case OP_4A_PS_RSQRTE: emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd, ax_rb, fps_frsqrte); break; case OP_4A_PS_MSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs); break; case OP_4A_PS_MADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds); break; case OP_4A_PS_NMSUB: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs); break; case OP_4A_PS_NMADD: emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd, ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds); break; } break; /* Real FPU operations */ case OP_LFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_LFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_LFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_LFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFS: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_STFSU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_STFD: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_STFDU: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d; emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case 31: switch (inst_get_field(inst, 21, 30)) { case OP_31_LFSX: { ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0; addr += kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_LFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_LFDX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_LFDUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFSX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); break; } case OP_31_STFSUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); break; } case OP_31_STFUX: { ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_DOUBLE); if (emulated == EMULATE_DONE) kvmppc_set_gpr(vcpu, ax_ra, addr); break; } case OP_31_STFIWX: { ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + kvmppc_get_gpr(vcpu, ax_rb); emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr, FPU_LS_SINGLE_LOW); break; } break; } break; case 59: switch (inst_get_field(inst, 21, 30)) { case OP_59_FADDS: fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FSUBS: fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FDIVS: fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRES: fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FRSQRTES: fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } switch (inst_get_field(inst, 26, 30)) { case OP_59_FMULS: fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMSUBS: fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FMADDS: fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMSUBS: fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_59_FNMADDS: fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; } break; case 63: switch (inst_get_field(inst, 21, 30)) { case OP_63_MTFSB0: case OP_63_MTFSB1: case OP_63_MCRFS: case OP_63_MTFSFI: /* XXX need to implement */ break; case OP_63_MFFS: /* XXX missing CR */ *fpr_d = vcpu->arch.fpscr; break; case OP_63_MTFSF: /* XXX missing fm bits */ /* XXX missing CR */ vcpu->arch.fpscr = *fpr_b; break; case OP_63_FCMPU: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FCMPO: { u32 tmp_cr; u32 cr0_mask = 0xf0000000; u32 cr_shift = inst_get_field(inst, 6, 8) * 4; fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b); cr &= ~(cr0_mask >> cr_shift); cr |= (cr & cr0_mask) >> cr_shift; break; } case OP_63_FNEG: fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FMR: *fpr_d = *fpr_b; break; case OP_63_FABS: fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCPSGN: fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FDIV: fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FADD: fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FSUB: fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b); break; case OP_63_FCTIW: fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FCTIWZ: fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); break; case OP_63_FRSP: fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); kvmppc_sync_qpr(vcpu, ax_rd); break; case OP_63_FRSQRTE: { double one = 1.0f; /* fD = sqrt(fB) */ fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b); /* fD = 1.0f / fD */ fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d); break; } } switch (inst_get_field(inst, 26, 30)) { case OP_63_FMUL: fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c); break; case OP_63_FSEL: fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMSUB: fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FMADD: fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMSUB: fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; case OP_63_FNMADD: fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b); break; } break; } #ifdef DEBUG for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) { u32 f; kvm_cvt_df(&vcpu->arch.fpr[i], &f); dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f); } #endif if (rcomp) kvmppc_set_cr(vcpu, cr); preempt_enable(); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, rs, ea); break; case OP_31_XOP_LHAX: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; case SPRN_SRR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; case SPRN_PVR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; case SPRN_PIR: kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; case SPRN_MSSSR0: kvmppc_set_gpr(vcpu, rt, 0); break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; case SPRN_TBWU: kvmppc_set_gpr(vcpu, rt, get_tb()); break; case SPRN_SPRG0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; case SPRN_SPRG1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; case SPRN_SPRG2: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; case SPRN_SPRG3: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: { u64 jd = get_tb() - vcpu->arch.dec_jiffies; kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, kvmppc_get_gpr(vcpu, rt)); break; } default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); kvmppc_set_gpr(vcpu, rt, 0); } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SRR1: vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG1: vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG2: vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG3: vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHA: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst; int rs, rt, sprn; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); if (emulated != EMULATE_DONE) return emulated; pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); rs = get_rs(inst); rt = get_rt(inst); sprn = get_sprn(inst); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_TLBSYNC: break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case 0: /* * Instruction with primary opcode 0. Based on PowerISA * these are illegal instructions. */ if (inst == KVMPPC_INST_SW_BREAKPOINT) { run->exit_reason = KVM_EXIT_DEBUG; run->debug.arch.address = kvmppc_get_pc(vcpu); emulated = EMULATE_EXIT_USER; advance = 0; } else emulated = EMULATE_FAIL; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }