int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int dcrn = get_dcrn(inst); int ra = get_ra(inst); int rb = get_rb(inst); int rc = get_rc(inst); int rs = get_rs(inst); int rt = get_rt(inst); int ws = get_ws(inst); switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case XOP_MFDCR: emulated = emulate_mfdcr(vcpu, rt, dcrn); break; case XOP_MFDCRX: emulated = emulate_mfdcr(vcpu, rt, kvmppc_get_gpr(vcpu, ra)); break; case XOP_MTDCR: emulated = emulate_mtdcr(vcpu, rs, dcrn); break; case XOP_MTDCRX: emulated = emulate_mtdcr(vcpu, rs, kvmppc_get_gpr(vcpu, ra)); break; case XOP_TLBWE: emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); break; case XOP_TLBSX: emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); break; case XOP_ICCCI: break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); return emulated; }
char *un_instr_disas(instruction instr, char *op_name) { struct Operand dest = get_rd(instr); struct Operand source = get_rs(instr); char *disas; char *dest_disas, *source_disas; disas = (char *)malloc(LEN*sizeof(char)); dest_disas = get_opw_disas(&dest); source_disas = get_opw_disas(&source); sprintf(disas, "%s %s, %s", op_name, source_disas, dest_disas); free(dest_disas); free(source_disas); return disas; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); int ra = get_ra(inst); int rs = get_rs(inst); int rt = get_rt(inst); int sprn = get_sprn(inst); enum emulation_result emulated = EMULATE_DONE; int advance = 1; ulong spr_val = 0; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MFSPR: switch (sprn) { case SPRN_SRR0: spr_val = vcpu->arch.shared->srr0; break; case SPRN_SRR1: spr_val = vcpu->arch.shared->srr1; break; case SPRN_PVR: spr_val = vcpu->arch.pvr; break; case SPRN_PIR: spr_val = vcpu->vcpu_id; break; case SPRN_MSSSR0: spr_val = 0; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: spr_val = get_tb() >> 32; break; case SPRN_TBWU: spr_val = get_tb(); break; case SPRN_SPRG0: spr_val = vcpu->arch.shared->sprg0; break; case SPRN_SPRG1: spr_val = vcpu->arch.shared->sprg1; break; case SPRN_SPRG2: spr_val = vcpu->arch.shared->sprg2; break; case SPRN_SPRG3: spr_val = vcpu->arch.shared->sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: spr_val = kvmppc_get_dec(vcpu, get_tb()); break; default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, &spr_val); if (unlikely(emulated == EMULATE_FAIL)) { printk(KERN_INFO "mfspr: unknown spr " "0x%x\n", sprn); } break; } kvmppc_set_gpr(vcpu, rt, spr_val); kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_MTSPR: spr_val = kvmppc_get_gpr(vcpu, rs); switch (sprn) { case SPRN_SRR0: vcpu->arch.shared->srr0 = spr_val; break; case SPRN_SRR1: vcpu->arch.shared->srr1 = spr_val; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = spr_val; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.shared->sprg0 = spr_val; break; case SPRN_SPRG1: vcpu->arch.shared->sprg1 = spr_val; break; case SPRN_SPRG2: vcpu->arch.shared->sprg2 = spr_val; break; case SPRN_SPRG3: vcpu->arch.shared->sprg3 = spr_val; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, spr_val); if (emulated == EMULATE_FAIL) printk(KERN_INFO "mtspr: unknown spr " "0x%x\n", sprn); break; } kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS); break; case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int rs = get_rs(inst); int rt = get_rt(inst); switch (get_op(inst)) { case 19: switch (get_xop(inst)) { case OP_19_XOP_RFI: kvmppc_emul_rfi(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS); *advance = 0; break; case OP_19_XOP_RFCI: kvmppc_emul_rfci(vcpu); kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS); *advance = 0; break; default: emulated = EMULATE_FAIL; break; } break; case 31: switch (get_xop(inst)) { case OP_31_XOP_MFMSR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr); kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS); break; case OP_31_XOP_MTMSR: kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS); kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs)); break; case OP_31_XOP_WRTEE: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (kvmppc_get_gpr(vcpu, rs) & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; case OP_31_XOP_WRTEEI: vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE) | (inst & MSR_EE); kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS); break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } return emulated; }
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu, unsigned int inst, int *advance) { int emulated = EMULATE_DONE; int dcrn; int ra; int rb; int rc; int rs; int rt; int ws; switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case XOP_MFDCR: dcrn = get_dcrn(inst); rt = get_rt(inst); /* The guest may access CPR0 registers to determine the timebase * frequency, and it must know the real host frequency because it * can directly access the timebase registers. * * It would be possible to emulate those accesses in userspace, * but userspace can really only figure out the end frequency. * We could decompose that into the factors that compute it, but * that's tricky math, and it's easier to just report the real * CPR0 values. */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr); break; case DCRN_CPR0_CONFIG_DATA: local_irq_disable(); mtdcr(DCRN_CPR0_CONFIG_ADDR, vcpu->arch.cpr0_cfgaddr); kvmppc_set_gpr(vcpu, rt, mfdcr(DCRN_CPR0_CONFIG_DATA)); local_irq_enable(); break; default: run->dcr.dcrn = dcrn; run->dcr.data = 0; run->dcr.is_write = 0; vcpu->arch.io_gpr = rt; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; } break; case XOP_MTDCR: dcrn = get_dcrn(inst); rs = get_rs(inst); /* emulate some access in kernel */ switch (dcrn) { case DCRN_CPR0_CONFIG_ADDR: vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs); break; default: run->dcr.dcrn = dcrn; run->dcr.data = kvmppc_get_gpr(vcpu, rs); run->dcr.is_write = 1; vcpu->arch.dcr_needed = 1; kvmppc_account_exit(vcpu, DCR_EXITS); emulated = EMULATE_DO_DCR; } break; case XOP_TLBWE: ra = get_ra(inst); rs = get_rs(inst); ws = get_ws(inst); emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws); break; case XOP_TLBSX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); rc = get_rc(inst); emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc); break; case XOP_ICCCI: break; default: emulated = EMULATE_FAIL; } break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance); return emulated; }
void i_fg_build (void) { unsigned i; i_puint32 cp = i_buf; /* Start of code to analyze */ i_puint32 lp = i_lim; /* End of code to analyze */ i_uint32 op; /* Current opcode */ i_bb_t b; /* Current basic block */ num_bb = 0; b = bb_alloc(); b->h = cp; if (i_ralloctype == RA_EZ) { i_fg_root = i_fg_tail = b; b->t = lp - i_isize; return; } i_calls_cur = i_calls_lim = 0; NEW0(lbl2bb, i_lab_cur); NEW(fwdrefs, i_nbb); fwdref_cur = 0; i_fg_root = b; do { assert(b && !b->init); op = get_op(cp); switch(i_op2class[op]) { case I_BOP: case I_BOPF: case I_MOPR: case I_MOPRF: assert(!isimmed(op)); markuse(b, get_rs(cp)); markuse(b, get_rs2(cp)); markdef(b, get_rd(cp)); break; case I_BOPI: case I_MOPRI: case I_MOPRIF: assert(isimmed(op)); markuse(b, get_rs(cp)); markdef(b, get_rd(cp)); break; case I_MOPW: case I_MOPWF: assert(!isimmed(op)); markuse(b, get_rd(cp)); markuse(b, get_rs(cp)); markuse(b, get_rs2(cp)); break; case I_MOPWI: case I_MOPWIF: assert(isimmed(op)); markuse(b, get_rd(cp)); markuse(b, get_rs(cp)); break; case I_UOP: case I_UOPF: assert(!isimmed(op)); markuse(b, get_rs(cp)); markdef(b, get_rd(cp)); break; case I_UOPI: assert(isimmed(op)); markdef(b, get_rd(cp)); break; case I_SET: case I_SETF: markdef(b, get_rd(cp)); break; case I_LEA: case I_LEAF: SCLASS(get_rs(cp)) = STACK; markuse(b, get_rs(cp)); markdef(b, get_rd(cp)); break; case I_RET: case I_RETF: if (op != i_op_retv) markuse(b, get_rd(cp)); case I_RETI: b = bb_finalize(b, cp, lp, i_isize, false); break; case I_BR: case I_BRF: markuse(b, get_rs2(cp)); case I_BRI: markuse(b, get_rs(cp)); bb_linklbl(b, get_rd(cp)); b = bb_finalize(b, cp, lp, i_isize, true); break; case I_CALL: case I_CALLF: markuse(b, get_rs(cp)); case I_CALLI: case I_CALLIF: if (op != i_op_callv && op != i_op_callvi) markdef(b, get_rd(cp)); markcall(cp); b = bb_finalize(b, cp, lp, 2*i_isize, true); cp += i_isize; /* Calls are 2x as long as other insns */ break; case I_ARG: case I_ARGF: markuse(b, get_rd(cp)); break; case I_JMP: assert(get_rd(cp) < num_i); markuse(b, get_rd(cp)); b = bb_finalize(b, cp, lp, i_isize, false); break; case I_JMPI: bb_linklbl(b, get_imm(cp)); b = bb_finalize(b, cp, lp, i_isize, false); break; case I_MISC: switch (op) { case i_op_lbl: if (cp > b->h) /* If not at head of current block ... */ /* ... make this the head of a new block */ b = bb_finalize(b, cp-i_isize, lp, i_isize, true); lbl2bb[get_rd(cp)] = b; break; default: /* refmul, refdiv, self, nop */ break; } break; default: assert(0); } } while ((cp += i_isize) < lp); i_fg_tail = b; for (i = 0; i < fwdref_cur; i++) bb_linkbb(fwdrefs[i].src, lbl2bb[fwdrefs[i].dst]); #ifndef NDEBUG for (i = 0, b = i_fg_root; b; b = b->lnext) i++; assert(i == num_bb); #endif }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = vcpu->arch.last_inst; u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); switch (get_op(inst)) { case OP_TRAP: vcpu->arch.esr |= ESR_PTR; kvmppc_core_queue_program(vcpu); advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[rs] = ea; break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.gpr[rt] = vcpu->arch.srr0; break; case SPRN_SRR1: vcpu->arch.gpr[rt] = vcpu->arch.srr1; break; case SPRN_PVR: vcpu->arch.gpr[rt] = vcpu->arch.pvr; break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: vcpu->arch.gpr[rt] = mftbl(); break; case SPRN_TBWU: vcpu->arch.gpr[rt] = mftbu(); break; case SPRN_SPRG0: vcpu->arch.gpr[rt] = vcpu->arch.sprg0; break; case SPRN_SPRG1: vcpu->arch.gpr[rt] = vcpu->arch.sprg1; break; case SPRN_SPRG2: vcpu->arch.gpr[rt] = vcpu->arch.sprg2; break; case SPRN_SPRG3: vcpu->arch.gpr[rt] = vcpu->arch.sprg3; break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); vcpu->arch.gpr[rt] = 0; } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = vcpu->arch.gpr[rb]; if (ra) ea += vcpu->arch.gpr[ra]; emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = ea; break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = vcpu->arch.gpr[rs]; break; case SPRN_SRR1: vcpu->arch.srr1 = vcpu->arch.gpr[rs]; break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_DEC: vcpu->arch.dec = vcpu->arch.gpr[rs]; kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG1: vcpu->arch.sprg1 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG2: vcpu->arch.sprg2 = vcpu->arch.gpr[rs]; break; case SPRN_SPRG3: vcpu->arch.sprg3 = vcpu->arch.gpr[rs]; break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 4, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 1, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, vcpu->arch.gpr[rs], 2, 1); vcpu->arch.gpr[ra] = vcpu->arch.paddr_accessed; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } KVMTRACE_3D(PPC_INSTR, vcpu, inst, (int)vcpu->arch.pc, emulated, entryexit); if (advance) vcpu->arch.pc += 4; /* Advance past emulated instruction. */ return emulated; }
/* i_ig_build: collect live variable information from the flow graph, and build up the interference graph */ void i_ig_build (void) { bvt iv=0, fv=0; /* Temp bit vector */ i_bb_t b; /* Temp bblock */ i_puint32 cp; /* Code pointer */ unsigned int op; /* Current opcode */ unsigned int nii, nfi; /* Marks new int/float liveness info */ i_local_t j; /* Temporary local for use in macros */ if (num_i) /* Create new integer IG */ iig = i_igcreate(num_i); if (num_f) /* Create new float IG */ fig = i_igcreate(num_f); b = i_fg_root; while (b) { iv = b->ilv_out; /* We will modify b->lv_[f]out, since we no */ fv = b->flv_out; /* longer need them */ assert(b->t >= b->h); for (cp = b->t; cp >= b->h; cp -= i_isize) { op = get_op(cp); nii = nfi = 0; switch(i_op2class[op]) { case I_MOPW: case I_MOPWF: gen(get_rd(cp)); gen(get_rs(cp)); gen(get_rs2(cp)); updateig(); break; case I_MOPWI: case I_MOPWIF: gen(get_rd(cp)); gen(get_rs(cp)); updateig(); break; case I_MOPR: case I_BOP: case I_MOPRF: case I_BOPF: case I_MOPRI: case I_BOPI: case I_MOPRIF: /* Substract def */ if (! kill(get_rd(cp))) { mk_nop(cp); break; } /* Fall through */ case I_BR: case I_BRI: case I_BRF: gen(get_rs(cp)); if (!isimmed(op)) gen(get_rs2(cp)); updateig(); break; case I_UOP: case I_UOPI: case I_UOPF: case I_LEA: case I_LEAF: /* Substract def */ if (!kill(get_rd(cp))) { mk_nop(cp); break; } /* Add use */ if (!isimmed(op)) gen(get_rs(cp)); updateig(); break; case I_SET: case I_SETF: /* Subtract def */ if (!kill(get_rd(cp))) mk_nop(cp); break; case I_ARG: case I_ARGF: case I_RET: case I_RETI: case I_RETF: case I_JMP: /* Add use */ if (op != i_op_retv && !isimmed(op)) { gen(get_rd(cp)); updateig(); } break; case I_CALL: case I_CALLF: case I_CALLI: case I_CALLIF: /* Substract def */ if (op != i_op_callv && op != i_op_callvi) kill(get_rd(cp)); /* Handle caller-saved registers */ if (num_f) fcallsav(cp, bv_cp(fv)); if (num_i) icallsav(cp, bv_cp(iv)); /* Add use of callee if not immediate */ if (!isimmed(op)) { gen(get_rs(cp)); updateig(); } break; case I_MISC: case I_JMPI: continue; default: assert(0); } } b = b->lnext; } }
/* XXX to do: * lhax * lhaux * lswx * lswi * stswx * stswi * lha * lhau * lmw * stmw * */ int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu) { struct kvm_run *run = vcpu->run; u32 inst; int ra, rs, rt; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, false, &inst); if (emulated != EMULATE_DONE) return emulated; ra = get_ra(inst); rs = get_rs(inst); rt = get_rt(inst); switch (get_op(inst)) { case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STWX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_LHAX: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_STHX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_31_XOP_DCBST: case OP_31_XOP_DCBF: case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_STWBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: emulated = EMULATE_FAIL; break; } break; case OP_LWZ: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */ case OP_LD: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1); break; case OP_LWZU: emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LBZ: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STW: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */ case OP_STD: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 8, 1); break; case OP_STWU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STB: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHZ: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_LHA: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; case OP_STH: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed); break; default: emulated = EMULATE_FAIL; break; } if (emulated == EMULATE_FAIL) { advance = 0; kvmppc_core_queue_program(vcpu, 0); } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
int handle_callback(int i, instruction instr) { int addr, addrs; int inc = 0; struct Operand dest = get_rd(instr); struct Operand source; short disp; switch (table[i].type) { case SA: if (instr.sa_instr.bw == 0) { addr = get_opb(&dest); inc = 1; } else { addr = get_opw(&dest); inc = 2; } table[i].callback(addr, 0); return 0; case DA: source = get_rs(instr); if (instr.da_instr.bw == 0) { addr = get_opb(&dest); addrs = get_opb(&source); inc = 1; } else { addr = get_opw(&dest); addrs = get_opw(&source); inc = 2; } table[i].callback(addr, addrs); addr_inc(&source, inc); break; case UN: source = get_rs(instr); addr = get_opw(&dest); addrs = get_opw(&source); inc = 2; table[i].callback(addr, addrs); addr_inc(&source, inc); break; case BR: disp = MAXBYTE & instr.instr; if (disp & SIGN) { disp = disp | HBYTE; } if ((table[i].callback(0, 0)) != 0) { memory.R[7] += 2 * disp; } return 0; case CTR_INT: addr = get_opw(&dest); case CTR: return table[i].callback(addr, 0); default: return 0; } addr_inc(&dest, inc); return 0; }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst = kvmppc_get_last_inst(vcpu); u32 ea; int ra; int rb; int rs; int rt; int sprn; enum emulation_result emulated = EMULATE_DONE; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_LWZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_31_XOP_LBZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_31_XOP_LBZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_STWX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_31_XOP_STBX: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_31_XOP_STBUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, rs, ea); break; case OP_31_XOP_LHAX: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_31_XOP_LHZUX: rt = get_rt(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MFSPR: sprn = get_sprn(inst); rt = get_rt(inst); switch (sprn) { case SPRN_SRR0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break; case SPRN_SRR1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break; case SPRN_PVR: kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break; case SPRN_PIR: kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break; case SPRN_MSSSR0: kvmppc_set_gpr(vcpu, rt, 0); break; /* Note: mftb and TBRL/TBWL are user-accessible, so * the guest can always access the real TB anyways. * In fact, we probably will never see these traps. */ case SPRN_TBWL: kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break; case SPRN_TBWU: kvmppc_set_gpr(vcpu, rt, get_tb()); break; case SPRN_SPRG0: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break; case SPRN_SPRG1: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break; case SPRN_SPRG2: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break; case SPRN_SPRG3: kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break; /* Note: SPRG4-7 are user-readable, so we don't get * a trap. */ case SPRN_DEC: { u64 jd = get_tb() - vcpu->arch.dec_jiffies; kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd); pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n", vcpu->arch.dec, jd, kvmppc_get_gpr(vcpu, rt)); break; } default: emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_FAIL) { printk("mfspr: unknown spr %x\n", sprn); kvmppc_set_gpr(vcpu, rt, 0); } break; } break; case OP_31_XOP_STHX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_31_XOP_STHUX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); ea = kvmppc_get_gpr(vcpu, rb); if (ra) ea += kvmppc_get_gpr(vcpu, ra); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, ea); break; case OP_31_XOP_MTSPR: sprn = get_sprn(inst); rs = get_rs(inst); switch (sprn) { case SPRN_SRR0: vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SRR1: vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break; /* XXX We need to context-switch the timebase for * watchdog and FIT. */ case SPRN_TBWL: break; case SPRN_TBWU: break; case SPRN_MSSSR0: break; case SPRN_DEC: vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs); kvmppc_emulate_dec(vcpu); break; case SPRN_SPRG0: vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG1: vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG2: vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break; case SPRN_SPRG3: vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break; default: emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_FAIL) printk("mtspr: unknown spr %x\n", sprn); break; } break; case OP_31_XOP_DCBI: /* Do nothing. The guest is performing dcbi because * hardware DMA is not snooped by the dcache, but * emulated DMA either goes through the dcache as * normal writes, or the host kernel has handled dcache * coherence. */ break; case OP_31_XOP_LWBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0); break; case OP_31_XOP_TLBSYNC: break; case OP_31_XOP_STWBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 0); break; case OP_31_XOP_LHBRX: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0); break; case OP_31_XOP_STHBRX: rs = get_rs(inst); ra = get_ra(inst); rb = get_rb(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 0); break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case OP_LWZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); break; case OP_LWZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LBZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); break; case OP_LBZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STW: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); break; case OP_STWU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 4, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STB: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); break; case OP_STBU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 1, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHZ: rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); break; case OP_LHZU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_LHA: rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); break; case OP_LHAU: ra = get_ra(inst); rt = get_rt(inst); emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; case OP_STH: rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); break; case OP_STHU: ra = get_ra(inst); rs = get_rs(inst); emulated = kvmppc_handle_store(run, vcpu, kvmppc_get_gpr(vcpu, rs), 2, 1); kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed); break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); kvmppc_core_queue_program(vcpu, 0); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }
void i_li_full (void) { bvt iskip=0, fskip=0; /* Variables live at both start & end of bb */ bvt ilv=0, flv=0; /* Live variables at each step */ i_bb_t b; /* Temp bblock */ i_puint32 cp; /* Code pointer */ i_cnt_t pos = max_cnt; /* Absolute layout-order position */ unsigned int op; /* Current opcode */ /* Allocate live ranges */ if (num_i) NEW0(i_ilrs, num_i); if (num_f) NEW0(i_flrs, num_f); i_ilr_cur = num_i-1; i_flr_cur = num_f-1; /* Now calculate the live ranges */ for (b = i_fg_tail; b; pos--, b = b->lprev) { tpos = pos; hpos = pos - (b->t - b->h)/i_isize; /* Compute skip set */ if (num_i) { iskip = bv_rinter(b->ilv_out, b->ilv_in); ilv = bv_rdiff(b->ilv_out, iskip); bv_eachbit(iskip, lriskip, b); } if (num_f) { fskip = bv_rinter(b->flv_out, b->flv_in); flv = bv_rdiff(b->flv_out, fskip); bv_eachbit(fskip, lrfskip, b); } assert(b && b->t >= b->h); for (cp = b->t; cp >= b->h; pos--, cp -= i_isize) { op = get_op(cp); switch(i_op2class[op]) { case I_MOPW: case I_MOPWF: USE(get_rs2(cp)); /* Fall through */ case I_MOPWI: case I_MOPWIF: USE(get_rd(cp)); USE(get_rs(cp)); break; case I_MOPR: case I_BOP: case I_MOPRF: case I_BOPF: case I_MOPRI: case I_BOPI: case I_MOPRIF: DEF(get_rd(cp)); /* Fall through */ case I_BR: case I_BRI: case I_BRF: USE(get_rs(cp)); if (!isimmed(op)) USE(get_rs2(cp)); break; case I_UOP: case I_UOPI: case I_UOPF: DEF(get_rd(cp)); if (!isimmed(op)) USE(get_rs(cp)); break; case I_SET: case I_SETF: DEF(get_rd(cp)); break; case I_LEA: case I_LEAF: DEF(get_rd(cp)); USE(get_rs(cp)); break; case I_ARG: case I_ARGF: case I_RETF: case I_JMP: USE(get_rd(cp)); break; case I_RET: if (op != i_op_retv) USE(get_rd(cp)); break; case I_CALL: case I_CALLF: case I_CALLI: case I_CALLIF: if (op != i_op_callv && op != i_op_callvi) DEFKP(get_rd(cp)); /* Add use of callee if not immediate; it is in an int register, so treat it like an int */ if (!isimmed(op)) USE(get_rs(cp)); /* Set info for caller-saved registers */ if (num_i) icallsav(cp, bv_runion(ilv, iskip)); if (num_f) fcallsav(cp, bv_runion(flv, fskip)); break; case I_MISC: case I_JMPI: case I_RETI: continue; default: assert(0); } } assert(pos+1 == hpos); if (num_i) bv_eachbit(ilv, lriuseiter, (void *)hpos); if (num_f) bv_eachbit(flv, lrfuseiter, (void *)hpos); } DEBUG(i_li_unparse()); }
/* XXX Should probably auto-generate instruction decoding for a particular core * from opcode tables in the future. */ int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu) { u32 inst; int rs, rt, sprn; enum emulation_result emulated; int advance = 1; /* this default type might be overwritten by subcategories */ kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS); emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst); if (emulated != EMULATE_DONE) return emulated; pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst)); rs = get_rs(inst); rt = get_rt(inst); sprn = get_sprn(inst); switch (get_op(inst)) { case OP_TRAP: #ifdef CONFIG_PPC_BOOK3S case OP_TRAP_64: kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case 31: switch (get_xop(inst)) { case OP_31_XOP_TRAP: #ifdef CONFIG_64BIT case OP_31_XOP_TRAP_64: #endif #ifdef CONFIG_PPC_BOOK3S kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP); #else kvmppc_core_queue_program(vcpu, vcpu->arch.shared->esr | ESR_PTR); #endif advance = 0; break; case OP_31_XOP_MFSPR: emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_MTSPR: emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs); if (emulated == EMULATE_AGAIN) { emulated = EMULATE_DONE; advance = 0; } break; case OP_31_XOP_TLBSYNC: break; default: /* Attempt core-specific emulation below. */ emulated = EMULATE_FAIL; } break; case 0: /* * Instruction with primary opcode 0. Based on PowerISA * these are illegal instructions. */ if (inst == KVMPPC_INST_SW_BREAKPOINT) { run->exit_reason = KVM_EXIT_DEBUG; run->debug.arch.address = kvmppc_get_pc(vcpu); emulated = EMULATE_EXIT_USER; advance = 0; } else emulated = EMULATE_FAIL; break; default: emulated = EMULATE_FAIL; } if (emulated == EMULATE_FAIL) { emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst, &advance); if (emulated == EMULATE_AGAIN) { advance = 0; } else if (emulated == EMULATE_FAIL) { advance = 0; printk(KERN_ERR "Couldn't emulate instruction 0x%08x " "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst)); } } trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated); /* Advance past emulated instruction. */ if (advance) kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4); return emulated; }