示例#1
0
static hva_t kvmppc_mmu_book3s_32_get_pteg(struct kvm_vcpu *vcpu,
				      u32 sre, gva_t eaddr,
				      bool primary)
{
	struct kvmppc_vcpu_book3s *vcpu_book3s = to_book3s(vcpu);
	u32 page, hash, pteg, htabmask;
	hva_t r;

	page = (eaddr & 0x0FFFFFFF) >> 12;
	htabmask = ((vcpu_book3s->sdr1 & 0x1FF) << 16) | 0xFFC0;

	hash = ((sr_vsid(sre) ^ page) << 6);
	if (!primary)
		hash = ~hash;
	hash &= htabmask;

	pteg = (vcpu_book3s->sdr1 & 0xffff0000) | hash;

	dprintk("MMU: pc=0x%lx eaddr=0x%lx sdr1=0x%llx pteg=0x%x vsid=0x%x\n",
		kvmppc_get_pc(vcpu), eaddr, vcpu_book3s->sdr1, pteg,
		sr_vsid(sre));

	r = gfn_to_hva(vcpu->kvm, pteg >> PAGE_SHIFT);
	if (kvm_is_error_hva(r))
		return r;
	return r | (pteg & ~PAGE_MASK);
}
示例#2
0
static void kvmppc_fixup_split_real(struct kvm_vcpu *vcpu)
{
	ulong msr = kvmppc_get_msr(vcpu);
	ulong pc = kvmppc_get_pc(vcpu);

	/* We are in DR only split real mode */
	if ((msr & (MSR_IR|MSR_DR)) != MSR_DR)
		return;

	/* We have not fixed up the guest already */
	if (vcpu->arch.hflags & BOOK3S_HFLAG_SPLIT_HACK)
		return;

	/* The code is in fixupable address space */
	if (pc & SPLIT_HACK_MASK)
		return;

	vcpu->arch.hflags |= BOOK3S_HFLAG_SPLIT_HACK;
	kvmppc_set_pc(vcpu, pc | SPLIT_HACK_OFFS);
}
示例#3
0
文件: emulate.c 项目: Naibasak/linux
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
    u32 inst = kvmppc_get_last_inst(vcpu);
    int ra = get_ra(inst);
    int rs = get_rs(inst);
    int rt = get_rt(inst);
    int sprn = get_sprn(inst);
    enum emulation_result emulated = EMULATE_DONE;
    int advance = 1;

    /* this default type might be overwritten by subcategories */
    kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

    pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

    switch (get_op(inst)) {
    case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
    case OP_TRAP_64:
        kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
        kvmppc_core_queue_program(vcpu,
                                  vcpu->arch.shared->esr | ESR_PTR);
#endif
        advance = 0;
        break;

    case 31:
        switch (get_xop(inst)) {

        case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
        case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
            kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
            kvmppc_core_queue_program(vcpu,
                                      vcpu->arch.shared->esr | ESR_PTR);
#endif
            advance = 0;
            break;
        case OP_31_XOP_LWZX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
            break;

        case OP_31_XOP_LBZX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
            break;

        case OP_31_XOP_LBZUX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_STWX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           4, 1);
            break;

        case OP_31_XOP_STBX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           1, 1);
            break;

        case OP_31_XOP_STBUX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           1, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_LHAX:
            emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
            break;

        case OP_31_XOP_LHZX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
            break;

        case OP_31_XOP_LHZUX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_MFSPR:
            emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
            break;

        case OP_31_XOP_STHX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           2, 1);
            break;

        case OP_31_XOP_STHUX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           2, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_MTSPR:
            emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
            break;

        case OP_31_XOP_DCBST:
        case OP_31_XOP_DCBF:
        case OP_31_XOP_DCBI:
            /* Do nothing. The guest is performing dcbi because
             * hardware DMA is not snooped by the dcache, but
             * emulated DMA either goes through the dcache as
             * normal writes, or the host kernel has handled dcache
             * coherence. */
            break;

        case OP_31_XOP_LWBRX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
            break;

        case OP_31_XOP_TLBSYNC:
            break;

        case OP_31_XOP_STWBRX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           4, 0);
            break;

        case OP_31_XOP_LHBRX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
            break;

        case OP_31_XOP_STHBRX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           2, 0);
            break;

        default:
            /* Attempt core-specific emulation below. */
            emulated = EMULATE_FAIL;
        }
        break;

    case OP_LWZ:
        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
        break;

    /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
    case OP_LD:
        rt = get_rt(inst);
        emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
        break;

    case OP_LWZU:
        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_LBZ:
        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
        break;

    case OP_LBZU:
        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_STW:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       4, 1);
        break;

    /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
    case OP_STD:
        rs = get_rs(inst);
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       8, 1);
        break;

    case OP_STWU:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       4, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_STB:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       1, 1);
        break;

    case OP_STBU:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       1, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_LHZ:
        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
        break;

    case OP_LHZU:
        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_LHA:
        emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
        break;

    case OP_LHAU:
        emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_STH:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       2, 1);
        break;

    case OP_STHU:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       2, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    default:
        emulated = EMULATE_FAIL;
    }

    if (emulated == EMULATE_FAIL) {
        emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
        if (emulated == EMULATE_AGAIN) {
            advance = 0;
        } else if (emulated == EMULATE_FAIL) {
            advance = 0;
            printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
                   "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
            kvmppc_core_queue_program(vcpu, 0);
        }
    }

    trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

    /* Advance past emulated instruction. */
    if (advance)
        kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

    return emulated;
}
示例#4
0
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u32 inst = kvmppc_get_last_inst(vcpu);
	int ra = get_ra(inst);
	int rs = get_rs(inst);
	int rt = get_rt(inst);
	int sprn = get_sprn(inst);
	enum emulation_result emulated = EMULATE_DONE;
	int advance = 1;
	ulong spr_val = 0;

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

	switch (get_op(inst)) {
	case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
	case OP_TRAP_64:
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
		kvmppc_core_queue_program(vcpu,
					  vcpu->arch.shared->esr | ESR_PTR);
#endif
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

		case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
		case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
			kvmppc_core_queue_program(vcpu,
					vcpu->arch.shared->esr | ESR_PTR);
#endif
			advance = 0;
			break;
		case OP_31_XOP_LWZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

		case OP_31_XOP_LBZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

		case OP_31_XOP_LBZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_STWX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 1);
			break;

		case OP_31_XOP_STBX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			break;

		case OP_31_XOP_STBUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_LHAX:
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_MFSPR:
			switch (sprn) {
			case SPRN_SRR0:
				spr_val = vcpu->arch.shared->srr0;
				break;
			case SPRN_SRR1:
				spr_val = vcpu->arch.shared->srr1;
				break;
			case SPRN_PVR:
				spr_val = vcpu->arch.pvr;
				break;
			case SPRN_PIR:
				spr_val = vcpu->vcpu_id;
				break;
			case SPRN_MSSSR0:
				spr_val = 0;
				break;

			/* Note: mftb and TBRL/TBWL are user-accessible, so
			 * the guest can always access the real TB anyways.
			 * In fact, we probably will never see these traps. */
			case SPRN_TBWL:
				spr_val = get_tb() >> 32;
				break;
			case SPRN_TBWU:
				spr_val = get_tb();
				break;

			case SPRN_SPRG0:
				spr_val = vcpu->arch.shared->sprg0;
				break;
			case SPRN_SPRG1:
				spr_val = vcpu->arch.shared->sprg1;
				break;
			case SPRN_SPRG2:
				spr_val = vcpu->arch.shared->sprg2;
				break;
			case SPRN_SPRG3:
				spr_val = vcpu->arch.shared->sprg3;
				break;
			/* Note: SPRG4-7 are user-readable, so we don't get
			 * a trap. */

			case SPRN_DEC:
				spr_val = kvmppc_get_dec(vcpu, get_tb());
				break;
			default:
				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
								     &spr_val);
				if (unlikely(emulated == EMULATE_FAIL)) {
					printk(KERN_INFO "mfspr: unknown spr "
						"0x%x\n", sprn);
				}
				break;
			}
			kvmppc_set_gpr(vcpu, rt, spr_val);
			kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
			break;

		case OP_31_XOP_STHX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			break;

		case OP_31_XOP_STHUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_MTSPR:
			spr_val = kvmppc_get_gpr(vcpu, rs);
			switch (sprn) {
			case SPRN_SRR0:
				vcpu->arch.shared->srr0 = spr_val;
				break;
			case SPRN_SRR1:
				vcpu->arch.shared->srr1 = spr_val;
				break;

			/* XXX We need to context-switch the timebase for
			 * watchdog and FIT. */
			case SPRN_TBWL: break;
			case SPRN_TBWU: break;

			case SPRN_MSSSR0: break;

			case SPRN_DEC:
				vcpu->arch.dec = spr_val;
				kvmppc_emulate_dec(vcpu);
				break;

			case SPRN_SPRG0:
				vcpu->arch.shared->sprg0 = spr_val;
				break;
			case SPRN_SPRG1:
				vcpu->arch.shared->sprg1 = spr_val;
				break;
			case SPRN_SPRG2:
				vcpu->arch.shared->sprg2 = spr_val;
				break;
			case SPRN_SPRG3:
				vcpu->arch.shared->sprg3 = spr_val;
				break;

			default:
				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
								     spr_val);
				if (emulated == EMULATE_FAIL)
					printk(KERN_INFO "mtspr: unknown spr "
						"0x%x\n", sprn);
				break;
			}
			kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
			break;

		case OP_31_XOP_DCBF:
		case OP_31_XOP_DCBI:
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

		case OP_31_XOP_LWBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

		case OP_31_XOP_TLBSYNC:
			break;

		case OP_31_XOP_STWBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 0);
			break;

		case OP_31_XOP_LHBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

		case OP_31_XOP_STHBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 0);
			break;

		default:
			/* Attempt core-specific emulation below. */
			emulated = EMULATE_FAIL;
		}
		break;

	case OP_LWZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
	case OP_LD:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
		break;

	case OP_LWZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LBZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

	case OP_LBZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STW:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		break;

	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
	case OP_STD:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               8, 1);
		break;

	case OP_STWU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STB:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		break;

	case OP_STBU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

	case OP_LHZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHA:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STH:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		break;

	case OP_STHU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	default:
		emulated = EMULATE_FAIL;
	}

	if (emulated == EMULATE_FAIL) {
		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
			kvmppc_core_queue_program(vcpu, 0);
		}
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}
示例#5
0
static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
			      struct task_struct *tsk)
{
	int r = RESUME_HOST;

	vcpu->stat.sum_exits++;

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;
	switch (vcpu->arch.trap) {
	/* We're good on these - the host merely wanted to get our attention */
	case BOOK3S_INTERRUPT_HV_DECREMENTER:
		vcpu->stat.dec_exits++;
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_EXTERNAL:
		vcpu->stat.ext_intr_exits++;
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_PERFMON:
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_PROGRAM:
	{
		ulong flags;
		/*
		 * Normally program interrupts are delivered directly
		 * to the guest by the hardware, but we can get here
		 * as a result of a hypervisor emulation interrupt
		 * (e40) getting turned into a 700 by BML RTAS.
		 */
		flags = vcpu->arch.shregs.msr & 0x1f0000ull;
		kvmppc_core_queue_program(vcpu, flags);
		r = RESUME_GUEST;
		break;
	}
	case BOOK3S_INTERRUPT_SYSCALL:
	{
		/* hcall - punt to userspace */
		int i;

		if (vcpu->arch.shregs.msr & MSR_PR) {
			/* sc 1 from userspace - reflect to guest syscall */
			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
			r = RESUME_GUEST;
			break;
		}
		run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
		for (i = 0; i < 9; ++i)
			run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
		run->exit_reason = KVM_EXIT_PAPR_HCALL;
		vcpu->arch.hcall_needed = 1;
		r = RESUME_HOST;
		break;
	}
	/*
	 * We get these next two if the guest does a bad real-mode access,
	 * as we have enabled VRMA (virtualized real mode area) mode in the
	 * LPCR.  We just generate an appropriate DSI/ISI to the guest.
	 */
	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
		vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
		vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
		kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_H_INST_STORAGE:
		kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
					0x08000000);
		r = RESUME_GUEST;
		break;
	/*
	 * This occurs if the guest executes an illegal instruction.
	 * We just generate a program interrupt to the guest, since
	 * we don't emulate any guest instructions at this stage.
	 */
	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
		kvmppc_core_queue_program(vcpu, 0x80000);
		r = RESUME_GUEST;
		break;
	default:
		kvmppc_dump_regs(vcpu);
		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
			vcpu->arch.trap, kvmppc_get_pc(vcpu),
			vcpu->arch.shregs.msr);
		r = RESUME_HOST;
		BUG();
		break;
	}


	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(tsk)) {
			vcpu->stat.signal_exits++;
			run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
		} else {
			kvmppc_core_deliver_interrupts(vcpu);
		}
	}

	return r;
}
/* XXX to do:
 * lhax
 * lhaux
 * lswx
 * lswi
 * stswx
 * stswi
 * lha
 * lhau
 * lmw
 * stmw
 *
 */
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	u32 inst;
	int ra, rs, rt;
	enum emulation_result emulated;
	int advance = 1;

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

	emulated = kvmppc_get_last_inst(vcpu, false, &inst);
	if (emulated != EMULATE_DONE)
		return emulated;

	ra = get_ra(inst);
	rs = get_rs(inst);
	rt = get_rt(inst);

	switch (get_op(inst)) {
	case 31:
		switch (get_xop(inst)) {
		case OP_31_XOP_LWZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

		case OP_31_XOP_LBZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

		case OP_31_XOP_LBZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_STWX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 1);
			break;

		case OP_31_XOP_STBX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			break;

		case OP_31_XOP_STBUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_LHAX:
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_STHX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			break;

		case OP_31_XOP_STHUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_DCBST:
		case OP_31_XOP_DCBF:
		case OP_31_XOP_DCBI:
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

		case OP_31_XOP_LWBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

		case OP_31_XOP_STWBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 0);
			break;

		case OP_31_XOP_LHBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

		case OP_31_XOP_STHBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 0);
			break;

		default:
			emulated = EMULATE_FAIL;
			break;
		}
		break;

	case OP_LWZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
	case OP_LD:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
		break;

	case OP_LWZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LBZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

	case OP_LBZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STW:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		break;

	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
	case OP_STD:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               8, 1);
		break;

	case OP_STWU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STB:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		break;

	case OP_STBU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

	case OP_LHZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHA:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STH:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		break;

	case OP_STHU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	default:
		emulated = EMULATE_FAIL;
		break;
	}

	if (emulated == EMULATE_FAIL) {
		advance = 0;
		kvmppc_core_queue_program(vcpu, 0);
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}
示例#7
0
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u32 inst = kvmppc_get_last_inst(vcpu);
	u32 ea;
	int ra;
	int rb;
	int rs;
	int rt;
	int sprn;
	enum emulation_result emulated = EMULATE_DONE;
	int advance = 1;

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

	pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

	switch (get_op(inst)) {
	case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
	case OP_TRAP_64:
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
		kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
#endif
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

		case OP_31_XOP_LWZX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

		case OP_31_XOP_LBZX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

		case OP_31_XOP_LBZUX:
			rt = get_rt(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			kvmppc_set_gpr(vcpu, ra, ea);
			break;

		case OP_31_XOP_STWX:
			rs = get_rs(inst);
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 1);
			break;

		case OP_31_XOP_STBX:
			rs = get_rs(inst);
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			break;

		case OP_31_XOP_STBUX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			kvmppc_set_gpr(vcpu, rs, ea);
			break;

		case OP_31_XOP_LHAX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZUX:
			rt = get_rt(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			kvmppc_set_gpr(vcpu, ra, ea);
			break;

		case OP_31_XOP_MFSPR:
			sprn = get_sprn(inst);
			rt = get_rt(inst);

			switch (sprn) {
			case SPRN_SRR0:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
			case SPRN_SRR1:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
			case SPRN_PVR:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
			case SPRN_PIR:
				kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
			case SPRN_MSSSR0:
				kvmppc_set_gpr(vcpu, rt, 0); break;

			/* Note: mftb and TBRL/TBWL are user-accessible, so
			 * the guest can always access the real TB anyways.
			 * In fact, we probably will never see these traps. */
			case SPRN_TBWL:
				kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
			case SPRN_TBWU:
				kvmppc_set_gpr(vcpu, rt, get_tb()); break;

			case SPRN_SPRG0:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
			case SPRN_SPRG1:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
			case SPRN_SPRG2:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
			case SPRN_SPRG3:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
			/* Note: SPRG4-7 are user-readable, so we don't get
			 * a trap. */

			case SPRN_DEC:
			{
				u64 jd = get_tb() - vcpu->arch.dec_jiffies;
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
				pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
					 vcpu->arch.dec, jd,
					 kvmppc_get_gpr(vcpu, rt));
				break;
			}
			default:
				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
				if (emulated == EMULATE_FAIL) {
					printk("mfspr: unknown spr %x\n", sprn);
					kvmppc_set_gpr(vcpu, rt, 0);
				}
				break;
			}
			break;

		case OP_31_XOP_STHX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			break;

		case OP_31_XOP_STHUX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			kvmppc_set_gpr(vcpu, ra, ea);
			break;

		case OP_31_XOP_MTSPR:
			sprn = get_sprn(inst);
			rs = get_rs(inst);
			switch (sprn) {
			case SPRN_SRR0:
				vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SRR1:
				vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;

			/* XXX We need to context-switch the timebase for
			 * watchdog and FIT. */
			case SPRN_TBWL: break;
			case SPRN_TBWU: break;

			case SPRN_MSSSR0: break;

			case SPRN_DEC:
				vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
				kvmppc_emulate_dec(vcpu);
				break;

			case SPRN_SPRG0:
				vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SPRG1:
				vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SPRG2:
				vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SPRG3:
				vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;

			default:
				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
				if (emulated == EMULATE_FAIL)
					printk("mtspr: unknown spr %x\n", sprn);
				break;
			}
			break;

		case OP_31_XOP_DCBI:
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

		case OP_31_XOP_LWBRX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

		case OP_31_XOP_TLBSYNC:
			break;

		case OP_31_XOP_STWBRX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 0);
			break;

		case OP_31_XOP_LHBRX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

		case OP_31_XOP_STHBRX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 0);
			break;

		default:
			/* Attempt core-specific emulation below. */
			emulated = EMULATE_FAIL;
		}
		break;

	case OP_LWZ:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

	case OP_LWZU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_LBZ:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

	case OP_LBZU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_STW:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		break;

	case OP_STWU:
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_STB:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		break;

	case OP_STBU:
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_LHZ:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

	case OP_LHZU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_LHA:
		rt = get_rt(inst);
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_STH:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		break;

	case OP_STHU:
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	default:
		emulated = EMULATE_FAIL;
	}

	if (emulated == EMULATE_FAIL) {
		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
			kvmppc_core_queue_program(vcpu, 0);
		}
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}
示例#8
0
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u32 inst;
	int rs, rt, sprn;
	enum emulation_result emulated;
	int advance = 1;

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

	emulated = kvmppc_get_last_inst(vcpu, INST_GENERIC, &inst);
	if (emulated != EMULATE_DONE)
		return emulated;

	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

	rs = get_rs(inst);
	rt = get_rt(inst);
	sprn = get_sprn(inst);

	switch (get_op(inst)) {
	case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
	case OP_TRAP_64:
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
		kvmppc_core_queue_program(vcpu,
					  vcpu->arch.shared->esr | ESR_PTR);
#endif
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

		case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
		case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
			kvmppc_core_queue_program(vcpu,
					vcpu->arch.shared->esr | ESR_PTR);
#endif
			advance = 0;
			break;

		case OP_31_XOP_MFSPR:
			emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
			if (emulated == EMULATE_AGAIN) {
				emulated = EMULATE_DONE;
				advance = 0;
			}
			break;

		case OP_31_XOP_MTSPR:
			emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
			if (emulated == EMULATE_AGAIN) {
				emulated = EMULATE_DONE;
				advance = 0;
			}
			break;

		case OP_31_XOP_TLBSYNC:
			break;

		default:
			/* Attempt core-specific emulation below. */
			emulated = EMULATE_FAIL;
		}
		break;

	case 0:
		/*
		 * Instruction with primary opcode 0. Based on PowerISA
		 * these are illegal instructions.
		 */
		if (inst == KVMPPC_INST_SW_BREAKPOINT) {
			run->exit_reason = KVM_EXIT_DEBUG;
			run->debug.arch.address = kvmppc_get_pc(vcpu);
			emulated = EMULATE_EXIT_USER;
			advance = 0;
		} else
			emulated = EMULATE_FAIL;

		break;

	default:
		emulated = EMULATE_FAIL;
	}

	if (emulated == EMULATE_FAIL) {
		emulated = vcpu->kvm->arch.kvm_ops->emulate_op(run, vcpu, inst,
							       &advance);
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
		}
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}