static void kvm_patch_ins_wrtee(u32 *inst, u32 rt, int imm_one) { u32 *p; int distance_start; int distance_end; ulong next_inst; p = kvm_alloc(kvm_emulate_wrtee_len * 4); if (!p) return; /* Find out where we are and put everything there */ distance_start = (ulong)p - (ulong)inst; next_inst = ((ulong)inst + 4); distance_end = next_inst - (ulong)&p[kvm_emulate_wrtee_branch_offs]; /* Make sure we only write valid b instructions */ if (distance_start > KVM_INST_B_MAX) { kvm_patching_worked = false; return; } /* Modify the chunk to fit the invocation */ memcpy(p, kvm_emulate_wrtee, kvm_emulate_wrtee_len * 4); p[kvm_emulate_wrtee_branch_offs] |= distance_end & KVM_INST_B_MASK; if (imm_one) { p[kvm_emulate_wrtee_reg_offs] = KVM_INST_LI | __PPC_RT(R30) | MSR_EE; } else { /* Make clobbered registers work too */ switch (get_rt(rt)) { case 30: kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], magic_var(scratch2), KVM_RT_30); break; case 31: kvm_patch_ins_ll(&p[kvm_emulate_wrtee_reg_offs], magic_var(scratch1), KVM_RT_30); break; default: p[kvm_emulate_wrtee_reg_offs] |= rt; break; } } p[kvm_emulate_wrtee_orig_ins_offs] = *inst; flush_icache_range((ulong)p, (ulong)p + kvm_emulate_wrtee_len * 4); /* Patch the invocation */ kvm_patch_ins_b(inst, distance_start); }
static void kvm_check_ins(u32 *inst, u32 features) { u32 _inst = *inst; u32 inst_no_rt = _inst & ~KVM_MASK_RT; u32 inst_rt = _inst & KVM_MASK_RT; switch (inst_no_rt) { /* Loads */ case KVM_INST_MFMSR: kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG0): kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG1): kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG2): kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG3): kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MFSPR(SPRN_SRR0): kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MFSPR(SPRN_SRR1): kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_DEAR): #else case KVM_INST_MFSPR(SPRN_DAR): #endif kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); break; case KVM_INST_MFSPR(SPRN_DSISR): kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); break; #ifdef CONFIG_PPC_BOOK3E_MMU case KVM_INST_MFSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS1): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS2): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS3): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS7): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); break; #endif /* CONFIG_PPC_BOOK3E_MMU */ case KVM_INST_MFSPR(SPRN_SPRG4): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG4R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG5): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG5R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG6): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG6R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG7): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG7R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_ESR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt); break; #endif case KVM_INST_MFSPR(SPRN_PIR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt); break; /* Stores */ case KVM_INST_MTSPR(SPRN_SPRG0): kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG1): kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG2): kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG3): kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MTSPR(SPRN_SRR0): kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MTSPR(SPRN_SRR1): kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MTSPR(SPRN_DEAR): #else case KVM_INST_MTSPR(SPRN_DAR): #endif kvm_patch_ins_std(inst, magic_var(dar), inst_rt); break; case KVM_INST_MTSPR(SPRN_DSISR): kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); break; #ifdef CONFIG_PPC_BOOK3E_MMU case KVM_INST_MTSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS1): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS2): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(mas2), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS3): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS7): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas7_3), inst_rt); break; #endif /* CONFIG_PPC_BOOK3E_MMU */ case KVM_INST_MTSPR(SPRN_SPRG4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg4), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG5): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg5), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg6), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG7): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(sprg7), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MTSPR(SPRN_ESR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(esr), inst_rt); break; #endif /* Nops */ case KVM_INST_TLBSYNC: kvm_patch_ins_nop(inst); break; /* Rewrites */ case KVM_INST_MTMSRD_L1: kvm_patch_ins_mtmsrd(inst, inst_rt); break; case KVM_INST_MTMSR: case KVM_INST_MTMSRD_L0: kvm_patch_ins_mtmsr(inst, inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_WRTEE: kvm_patch_ins_wrtee(inst, inst_rt, 0); break; #endif } switch (inst_no_rt & ~KVM_MASK_RB) { #ifdef CONFIG_PPC_BOOK3S_32 case KVM_INST_MTSRIN: if (features & KVM_MAGIC_FEAT_SR) { u32 inst_rb = _inst & KVM_MASK_RB; kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb); } break; #endif } switch (_inst) { #ifdef CONFIG_BOOKE case KVM_INST_WRTEEI_0: kvm_patch_ins_wrteei_0(inst); break; case KVM_INST_WRTEEI_1: kvm_patch_ins_wrtee(inst, 0, 1); break; #endif } }
static void kvm_check_ins(u32 *inst, u32 features) { u32 _inst = *inst; u32 inst_no_rt = _inst & ~KVM_MASK_RT; u32 inst_rt = _inst & KVM_MASK_RT; switch (inst_no_rt) { /* Loads */ case KVM_INST_MFMSR: kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG0): kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG1): kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG2): kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG3): kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MFSPR(SPRN_SRR0): kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MFSPR(SPRN_SRR1): kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_DEAR): #else case KVM_INST_MFSPR(SPRN_DAR): #endif kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); break; case KVM_INST_MFSPR(SPRN_DSISR): kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); break; #ifdef CONFIG_PPC_BOOK3E_MMU case KVM_INST_MFSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas0), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS1): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas1), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS2): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(mas2), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS3): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3) + 4, inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas4), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas6), inst_rt); break; case KVM_INST_MFSPR(SPRN_MAS7): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(mas7_3), inst_rt); break; #endif /* CONFIG_PPC_BOOK3E_MMU */ case KVM_INST_MFSPR(SPRN_SPRG4): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG4R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg4), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG5): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG5R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg5), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG6): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG6R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg6), inst_rt); break; case KVM_INST_MFSPR(SPRN_SPRG7): #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_SPRG7R): #endif if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_ld(inst, magic_var(sprg7), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MFSPR(SPRN_ESR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(esr), inst_rt); break; #endif case KVM_INST_MFSPR(SPRN_PIR): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_lwz(inst, magic_var(pir), inst_rt); break; /* Stores */ case KVM_INST_MTSPR(SPRN_SPRG0): kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG1): kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG2): kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MTSPR(SPRN_SPRG3): kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MTSPR(SPRN_SRR0): kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MTSPR(SPRN_SRR1): kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); break; #ifdef CONFIG_BOOKE case KVM_INST_MTSPR(SPRN_DEAR): #else case KVM_INST_MTSPR(SPRN_DAR): #endif kvm_patch_ins_std(inst, magic_var(dar), inst_rt); break; case KVM_INST_MTSPR(SPRN_DSISR): kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); break; #ifdef CONFIG_PPC_BOOK3E_MMU case KVM_INST_MTSPR(SPRN_MAS0): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas0), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS1): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas1), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS2): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_std(inst, magic_var(mas2), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS3): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas7_3) + 4, inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS4): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas4), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS6): if (features & KVM_MAGIC_FEAT_MAS0_TO_SPRG7) kvm_patch_ins_stw(inst, magic_var(mas6), inst_rt); break; case KVM_INST_MTSPR(SPRN_MAS7): if (feat
static void kvm_check_ins(u32 *inst, u32 features) { u32 _inst = *inst; u32 inst_no_rt = _inst & ~KVM_MASK_RT; u32 inst_rt = _inst & KVM_MASK_RT; switch (inst_no_rt) { /* Loads */ case KVM_INST_MFMSR: kvm_patch_ins_ld(inst, magic_var(msr), inst_rt); break; case KVM_INST_MFSPR_SPRG0: kvm_patch_ins_ld(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MFSPR_SPRG1: kvm_patch_ins_ld(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MFSPR_SPRG2: kvm_patch_ins_ld(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MFSPR_SPRG3: kvm_patch_ins_ld(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MFSPR_SRR0: kvm_patch_ins_ld(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MFSPR_SRR1: kvm_patch_ins_ld(inst, magic_var(srr1), inst_rt); break; case KVM_INST_MFSPR_DAR: kvm_patch_ins_ld(inst, magic_var(dar), inst_rt); break; case KVM_INST_MFSPR_DSISR: kvm_patch_ins_lwz(inst, magic_var(dsisr), inst_rt); break; /* Stores */ case KVM_INST_MTSPR_SPRG0: kvm_patch_ins_std(inst, magic_var(sprg0), inst_rt); break; case KVM_INST_MTSPR_SPRG1: kvm_patch_ins_std(inst, magic_var(sprg1), inst_rt); break; case KVM_INST_MTSPR_SPRG2: kvm_patch_ins_std(inst, magic_var(sprg2), inst_rt); break; case KVM_INST_MTSPR_SPRG3: kvm_patch_ins_std(inst, magic_var(sprg3), inst_rt); break; case KVM_INST_MTSPR_SRR0: kvm_patch_ins_std(inst, magic_var(srr0), inst_rt); break; case KVM_INST_MTSPR_SRR1: kvm_patch_ins_std(inst, magic_var(srr1), inst_rt); break; case KVM_INST_MTSPR_DAR: kvm_patch_ins_std(inst, magic_var(dar), inst_rt); break; case KVM_INST_MTSPR_DSISR: kvm_patch_ins_stw(inst, magic_var(dsisr), inst_rt); break; /* Nops */ case KVM_INST_TLBSYNC: kvm_patch_ins_nop(inst); break; /* Rewrites */ case KVM_INST_MTMSRD_L1: kvm_patch_ins_mtmsrd(inst, inst_rt); break; case KVM_INST_MTMSR: case KVM_INST_MTMSRD_L0: kvm_patch_ins_mtmsr(inst, inst_rt); break; } switch (inst_no_rt & ~KVM_MASK_RB) { #ifdef CONFIG_PPC_BOOK3S_32 case KVM_INST_MTSRIN: if (features & KVM_MAGIC_FEAT_SR) { u32 inst_rb = _inst & KVM_MASK_RB; kvm_patch_ins_mtsrin(inst, inst_rt, inst_rb); } break; break; #endif } switch (_inst) { #ifdef CONFIG_BOOKE case KVM_INST_WRTEEI_0: case KVM_INST_WRTEEI_1: kvm_patch_ins_wrteei(inst); break; #endif } }