Esempio n. 1
0
int kvmppc_core_emulate_op_44x(struct kvm_run *run, struct kvm_vcpu *vcpu,
			       unsigned int inst, int *advance)
{
	int emulated = EMULATE_DONE;
	int dcrn = get_dcrn(inst);
	int ra = get_ra(inst);
	int rb = get_rb(inst);
	int rc = get_rc(inst);
	int rs = get_rs(inst);
	int rt = get_rt(inst);
	int ws = get_ws(inst);

	switch (get_op(inst)) {
	case 31:
		switch (get_xop(inst)) {

		case XOP_MFDCR:
			emulated = emulate_mfdcr(vcpu, rt, dcrn);
			break;

		case XOP_MFDCRX:
			emulated = emulate_mfdcr(vcpu, rt,
					kvmppc_get_gpr(vcpu, ra));
			break;

		case XOP_MTDCR:
			emulated = emulate_mtdcr(vcpu, rs, dcrn);
			break;

		case XOP_MTDCRX:
			emulated = emulate_mtdcr(vcpu, rs,
					kvmppc_get_gpr(vcpu, ra));
			break;

		case XOP_TLBWE:
			emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
			break;

		case XOP_TLBSX:
			emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
			break;

		case XOP_ICCCI:
			break;

		default:
			emulated = EMULATE_FAIL;
		}

		break;

	default:
		emulated = EMULATE_FAIL;
	}

	if (emulated == EMULATE_FAIL)
		emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);

	return emulated;
}
Esempio n. 2
0
void kvmppc_dump_regs(struct kvm_vcpu *vcpu)
{
	int r;

	pr_err("vcpu %p (%d):\n", vcpu, vcpu->vcpu_id);
	pr_err("pc  = %.16lx  msr = %.16llx  trap = %x\n",
	       vcpu->arch.pc, vcpu->arch.shregs.msr, vcpu->arch.trap);
	for (r = 0; r < 16; ++r)
		pr_err("r%2d = %.16lx  r%d = %.16lx\n",
		       r, kvmppc_get_gpr(vcpu, r),
		       r+16, kvmppc_get_gpr(vcpu, r+16));
	pr_err("ctr = %.16lx  lr  = %.16lx\n",
	       vcpu->arch.ctr, vcpu->arch.lr);
	pr_err("srr0 = %.16llx srr1 = %.16llx\n",
	       vcpu->arch.shregs.srr0, vcpu->arch.shregs.srr1);
	pr_err("sprg0 = %.16llx sprg1 = %.16llx\n",
	       vcpu->arch.shregs.sprg0, vcpu->arch.shregs.sprg1);
	pr_err("sprg2 = %.16llx sprg3 = %.16llx\n",
	       vcpu->arch.shregs.sprg2, vcpu->arch.shregs.sprg3);
	pr_err("cr = %.8x  xer = %.16lx  dsisr = %.8x\n",
	       vcpu->arch.cr, vcpu->arch.xer, vcpu->arch.shregs.dsisr);
	pr_err("dar = %.16llx\n", vcpu->arch.shregs.dar);
	pr_err("fault dar = %.16lx dsisr = %.8x\n",
	       vcpu->arch.fault_dar, vcpu->arch.fault_dsisr);
	pr_err("SLB (%d entries):\n", vcpu->arch.slb_max);
	for (r = 0; r < vcpu->arch.slb_max; ++r)
		pr_err("  ESID = %.16llx VSID = %.16llx\n",
		       vcpu->arch.slb[r].orige, vcpu->arch.slb[r].origv);
	pr_err("lpcr = %.16lx sdr1 = %.16lx last_inst = %.8x\n",
	       vcpu->arch.lpcr, vcpu->kvm->arch.sdr1,
	       vcpu->arch.last_inst);
}
Esempio n. 3
0
static int kvmppc_h_pr_remove(struct kvm_vcpu *vcpu)
{
	unsigned long flags= kvmppc_get_gpr(vcpu, 4);
	unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
	unsigned long v = 0, pteg, rb;
	unsigned long pte[2];
	long int ret;

	pteg = get_pteg_addr(vcpu, pte_index);
	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
	copy_from_user(pte, (void __user *)pteg, sizeof(pte));

	ret = H_NOT_FOUND;
	if ((pte[0] & HPTE_V_VALID) == 0 ||
	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn) ||
	    ((flags & H_ANDCOND) && (pte[0] & avpn) != 0))
		goto done;

	copy_to_user((void __user *)pteg, &v, sizeof(v));

	rb = compute_tlbie_rb(pte[0], pte[1], pte_index);
	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);

	ret = H_SUCCESS;
	kvmppc_set_gpr(vcpu, 4, pte[0]);
	kvmppc_set_gpr(vcpu, 5, pte[1]);

 done:
	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
	kvmppc_set_gpr(vcpu, 3, ret);

	return EMULATE_DONE;
}
Esempio n. 4
0
static int kvmppc_h_pr_bulk_remove(struct kvm_vcpu *vcpu)
{
	int i;
	int paramnr = 4;
	int ret = H_SUCCESS;

	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
	for (i = 0; i < H_BULK_REMOVE_MAX_BATCH; i++) {
		unsigned long tsh = kvmppc_get_gpr(vcpu, paramnr+(2*i));
		unsigned long tsl = kvmppc_get_gpr(vcpu, paramnr+(2*i)+1);
		unsigned long pteg, rb, flags;
		unsigned long pte[2];
		unsigned long v = 0;

		if ((tsh & H_BULK_REMOVE_TYPE) == H_BULK_REMOVE_END) {
			break; /* Exit success */
		} else if ((tsh & H_BULK_REMOVE_TYPE) !=
			   H_BULK_REMOVE_REQUEST) {
			ret = H_PARAMETER;
			break; /* Exit fail */
		}

		tsh &= H_BULK_REMOVE_PTEX | H_BULK_REMOVE_FLAGS;
		tsh |= H_BULK_REMOVE_RESPONSE;

		if ((tsh & H_BULK_REMOVE_ANDCOND) &&
		    (tsh & H_BULK_REMOVE_AVPN)) {
			tsh |= H_BULK_REMOVE_PARM;
			kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
			ret = H_PARAMETER;
			break; /* Exit fail */
		}

		pteg = get_pteg_addr(vcpu, tsh & H_BULK_REMOVE_PTEX);
		copy_from_user(pte, (void __user *)pteg, sizeof(pte));

		/* tsl = AVPN */
		flags = (tsh & H_BULK_REMOVE_FLAGS) >> 26;

		if ((pte[0] & HPTE_V_VALID) == 0 ||
		    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != tsl) ||
		    ((flags & H_ANDCOND) && (pte[0] & tsl) != 0)) {
			tsh |= H_BULK_REMOVE_NOT_FOUND;
		} else {
			/* Splat the pteg in (userland) hpt */
			copy_to_user((void __user *)pteg, &v, sizeof(v));

			rb = compute_tlbie_rb(pte[0], pte[1],
					      tsh & H_BULK_REMOVE_PTEX);
			vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
			tsh |= H_BULK_REMOVE_SUCCESS;
			tsh |= (pte[1] & (HPTE_R_C | HPTE_R_R)) << 43;
		}
		kvmppc_set_gpr(vcpu, paramnr+(2*i), tsh);
	}
	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
	kvmppc_set_gpr(vcpu, 3, ret);

	return EMULATE_DONE;
}
Esempio n. 5
0
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
	int nr = kvmppc_get_gpr(vcpu, 11);
	int r;
	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
	unsigned long r2 = 0;

	if (!(vcpu->arch.shared->msr & MSR_SF)) {
		/* 32 bit mode */
		param1 &= 0xffffffff;
		param2 &= 0xffffffff;
		param3 &= 0xffffffff;
		param4 &= 0xffffffff;
	}

	switch (nr) {
	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
	{
		vcpu->arch.magic_page_pa = param1;
		vcpu->arch.magic_page_ea = param2;

		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;

		r = EV_SUCCESS;
		break;
	}
	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
		r = EV_SUCCESS;
#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
		/* XXX Missing magic page on 44x */
		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif

		/* Second return value is in r4 */
		break;
	case EV_HCALL_TOKEN(EV_IDLE):
		r = EV_SUCCESS;
		kvm_vcpu_block(vcpu);
		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
		break;
	default:
		r = EV_UNIMPLEMENTED;
		break;
	}

	kvmppc_set_gpr(vcpu, 4, r2);

	return r;
}
Esempio n. 6
0
static int kvmppc_h_pr_put_tce(struct kvm_vcpu *vcpu)
{
	unsigned long liobn = kvmppc_get_gpr(vcpu, 4);
	unsigned long ioba = kvmppc_get_gpr(vcpu, 5);
	unsigned long tce = kvmppc_get_gpr(vcpu, 6);
	long rc;

	rc = kvmppc_h_put_tce(vcpu, liobn, ioba, tce);
	if (rc == H_TOO_HARD)
		return EMULATE_FAIL;
	kvmppc_set_gpr(vcpu, 3, rc);
	return EMULATE_DONE;
}
Esempio n. 7
0
int kvm_arch_vcpu_ioctl_get_regs(struct kvm_vcpu *vcpu, struct kvm_regs *regs)
{
	int i;

	vcpu_load(vcpu);

	regs->pc = vcpu->arch.pc;
	regs->cr = kvmppc_get_cr(vcpu);
	regs->ctr = vcpu->arch.ctr;
	regs->lr = vcpu->arch.lr;
	regs->xer = kvmppc_get_xer(vcpu);
	regs->msr = vcpu->arch.msr;
	regs->srr0 = vcpu->arch.srr0;
	regs->srr1 = vcpu->arch.srr1;
	regs->pid = vcpu->arch.pid;
	regs->sprg0 = vcpu->arch.sprg0;
	regs->sprg1 = vcpu->arch.sprg1;
	regs->sprg2 = vcpu->arch.sprg2;
	regs->sprg3 = vcpu->arch.sprg3;
	regs->sprg5 = vcpu->arch.sprg4;
	regs->sprg6 = vcpu->arch.sprg5;
	regs->sprg7 = vcpu->arch.sprg6;

	for (i = 0; i < ARRAY_SIZE(regs->gpr); i++)
		regs->gpr[i] = kvmppc_get_gpr(vcpu, i);

	vcpu_put(vcpu);

	return 0;
}
Esempio n. 8
0
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
	int nr = kvmppc_get_gpr(vcpu, 11);
	int r;
	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
	unsigned long r2 = 0;

	if (!(vcpu->arch.shared->msr & MSR_SF)) {
		/* 32 bit mode */
		param1 &= 0xffffffff;
		param2 &= 0xffffffff;
		param3 &= 0xffffffff;
		param4 &= 0xffffffff;
	}

	switch (nr) {
	case HC_VENDOR_KVM | KVM_HC_PPC_MAP_MAGIC_PAGE:
	{
		vcpu->arch.magic_page_pa = param1;
		vcpu->arch.magic_page_ea = param2;

		r2 = KVM_MAGIC_FEAT_SR;

		r = HC_EV_SUCCESS;
		break;
	}
	case HC_VENDOR_KVM | KVM_HC_FEATURES:
		r = HC_EV_SUCCESS;
#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500)
		/* XXX Missing magic page on 44x */
		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif

		/* Second return value is in r4 */
		break;
	default:
		r = HC_EV_UNIMPLEMENTED;
		break;
	}

	kvmppc_set_gpr(vcpu, 4, r2);

	return r;
}
Esempio n. 9
0
static int emulate_mtdcr(struct kvm_vcpu *vcpu, int rs, int dcrn)
{
	/* emulate some access in kernel */
	switch (dcrn) {
	case DCRN_CPR0_CONFIG_ADDR:
		vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
		return EMULATE_DONE;
	default:
		vcpu->run->dcr.dcrn = dcrn;
		vcpu->run->dcr.data = kvmppc_get_gpr(vcpu, rs);
		vcpu->run->dcr.is_write = 1;
		vcpu->arch.dcr_is_write = 1;
		vcpu->arch.dcr_needed = 1;
		kvmppc_account_exit(vcpu, DCR_EXITS);
		return EMULATE_DO_DCR;
	}
}
Esempio n. 10
0
/* TODO: use vcpu_printf() */
void kvmppc_dump_vcpu(struct kvm_vcpu *vcpu)
{
	int i;

	printk("pc:   %08lx msr:  %08lx\n", vcpu->arch.pc, vcpu->arch.msr);
	printk("lr:   %08lx ctr:  %08lx\n", vcpu->arch.lr, vcpu->arch.ctr);
	printk("srr0: %08lx srr1: %08lx\n", vcpu->arch.srr0, vcpu->arch.srr1);

	printk("exceptions: %08lx\n", vcpu->arch.pending_exceptions);

	for (i = 0; i < 32; i += 4) {
		printk("gpr%02d: %08lx %08lx %08lx %08lx\n", i,
		       kvmppc_get_gpr(vcpu, i),
		       kvmppc_get_gpr(vcpu, i+1),
		       kvmppc_get_gpr(vcpu, i+2),
		       kvmppc_get_gpr(vcpu, i+3));
	}
}
Esempio n. 11
0
static int kvmppc_h_pr_enter(struct kvm_vcpu *vcpu)
{
	long flags = kvmppc_get_gpr(vcpu, 4);
	long pte_index = kvmppc_get_gpr(vcpu, 5);
	unsigned long pteg[2 * 8];
	unsigned long pteg_addr, i, *hpte;
	long int ret;

	i = pte_index & 7;
	pte_index &= ~7UL;
	pteg_addr = get_pteg_addr(vcpu, pte_index);

	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
	copy_from_user(pteg, (void __user *)pteg_addr, sizeof(pteg));
	hpte = pteg;

	ret = H_PTEG_FULL;
	if (likely((flags & H_EXACT) == 0)) {
		for (i = 0; ; ++i) {
			if (i == 8)
				goto done;
			if ((*hpte & HPTE_V_VALID) == 0)
				break;
			hpte += 2;
		}
	} else {
		hpte += i * 2;
		if (*hpte & HPTE_V_VALID)
			goto done;
	}

	hpte[0] = kvmppc_get_gpr(vcpu, 6);
	hpte[1] = kvmppc_get_gpr(vcpu, 7);
	pteg_addr += i * HPTE_SIZE;
	copy_to_user((void __user *)pteg_addr, hpte, HPTE_SIZE);
	kvmppc_set_gpr(vcpu, 4, pte_index | i);
	ret = H_SUCCESS;

 done:
	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
	kvmppc_set_gpr(vcpu, 3, ret);

	return EMULATE_DONE;
}
Esempio n. 12
0
static int kvmppc_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
    enum emulation_result emulated = EMULATE_DONE;
    ulong spr_val = kvmppc_get_gpr(vcpu, rs);

    switch (sprn) {
    case SPRN_SRR0:
        vcpu->arch.shared->srr0 = spr_val;
        break;
    case SPRN_SRR1:
        vcpu->arch.shared->srr1 = spr_val;
        break;

    /* XXX We need to context-switch the timebase for
     * watchdog and FIT. */
    case SPRN_TBWL:
        break;
    case SPRN_TBWU:
        break;

    case SPRN_DEC:
        vcpu->arch.dec = spr_val;
        kvmppc_emulate_dec(vcpu);
        break;

    case SPRN_SPRG0:
        vcpu->arch.shared->sprg0 = spr_val;
        break;
    case SPRN_SPRG1:
        vcpu->arch.shared->sprg1 = spr_val;
        break;
    case SPRN_SPRG2:
        vcpu->arch.shared->sprg2 = spr_val;
        break;
    case SPRN_SPRG3:
        vcpu->arch.shared->sprg3 = spr_val;
        break;

    /* PIR can legally be written, but we ignore it */
    case SPRN_PIR:
        break;

    default:
        emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
                                             spr_val);
        if (emulated == EMULATE_FAIL)
            printk(KERN_INFO "mtspr: unknown spr "
                   "0x%x\n", sprn);
        break;
    }

    kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);

    return emulated;
}
Esempio n. 13
0
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
	int emulated = EMULATE_DONE;

	switch (sprn) {
	case SPRN_PID:
		kvmppc_set_pid(vcpu, kvmppc_get_gpr(vcpu, rs)); break;
	case SPRN_MMUCR:
		vcpu->arch.mmucr = kvmppc_get_gpr(vcpu, rs); break;
	case SPRN_CCR0:
		vcpu->arch.ccr0 = kvmppc_get_gpr(vcpu, rs); break;
	case SPRN_CCR1:
		vcpu->arch.ccr1 = kvmppc_get_gpr(vcpu, rs); break;
	default:
		emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
	}

	kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
	return emulated;
}
Esempio n. 14
0
static int kvmppc_h_pr_protect(struct kvm_vcpu *vcpu)
{
	unsigned long flags = kvmppc_get_gpr(vcpu, 4);
	unsigned long pte_index = kvmppc_get_gpr(vcpu, 5);
	unsigned long avpn = kvmppc_get_gpr(vcpu, 6);
	unsigned long rb, pteg, r, v;
	unsigned long pte[2];
	long int ret;

	pteg = get_pteg_addr(vcpu, pte_index);
	mutex_lock(&vcpu->kvm->arch.hpt_mutex);
	copy_from_user(pte, (void __user *)pteg, sizeof(pte));

	ret = H_NOT_FOUND;
	if ((pte[0] & HPTE_V_VALID) == 0 ||
	    ((flags & H_AVPN) && (pte[0] & ~0x7fUL) != avpn))
		goto done;

	v = pte[0];
	r = pte[1];
	r &= ~(HPTE_R_PP0 | HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_HI |
	       HPTE_R_KEY_LO);
	r |= (flags << 55) & HPTE_R_PP0;
	r |= (flags << 48) & HPTE_R_KEY_HI;
	r |= flags & (HPTE_R_PP | HPTE_R_N | HPTE_R_KEY_LO);

	pte[1] = r;

	rb = compute_tlbie_rb(v, r, pte_index);
	vcpu->arch.mmu.tlbie(vcpu, rb, rb & 1 ? true : false);
	copy_to_user((void __user *)pteg, pte, sizeof(pte));
	ret = H_SUCCESS;

 done:
	mutex_unlock(&vcpu->kvm->arch.hpt_mutex);
	kvmppc_set_gpr(vcpu, 3, ret);

	return EMULATE_DONE;
}
Esempio n. 15
0
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
	int emulated = EMULATE_DONE;
	ulong spr_val = kvmppc_get_gpr(vcpu, rs);

	switch (sprn) {
	case SPRN_DEAR:
		vcpu->arch.shared->dar = spr_val; break;
	case SPRN_ESR:
<<<<<<< HEAD
		vcpu->arch.shared->esr = spr_val; break;
=======
<<<<<<< HEAD
		vcpu->arch.shared->esr = spr_val; break;
=======
Esempio n. 16
0
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                                        unsigned int priority)
{
	int allowed = 0;
	ulong uninitialized_var(msr_mask);
	bool update_esr = false, update_dear = false;
	ulong crit_raw = vcpu->arch.shared->critical;
	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
	bool crit;
	bool keep_irq = false;

	/* Truncate crit indicators in 32 bit mode */
	if (!(vcpu->arch.shared->msr & MSR_SF)) {
		crit_raw &= 0xffffffff;
		crit_r1 &= 0xffffffff;
	}

	/* Critical section when crit == r1 */
	crit = (crit_raw == crit_r1);
	/* ... and we're in supervisor mode */
	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);

	if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
		priority = BOOKE_IRQPRIO_EXTERNAL;
		keep_irq = true;
	}

	switch (priority) {
	case BOOKE_IRQPRIO_DTLB_MISS:
	case BOOKE_IRQPRIO_DATA_STORAGE:
		update_dear = true;
		/* fall through */
	case BOOKE_IRQPRIO_INST_STORAGE:
	case BOOKE_IRQPRIO_PROGRAM:
		update_esr = true;
		/* fall through */
	case BOOKE_IRQPRIO_ITLB_MISS:
	case BOOKE_IRQPRIO_SYSCALL:
	case BOOKE_IRQPRIO_FP_UNAVAIL:
	case BOOKE_IRQPRIO_SPE_UNAVAIL:
	case BOOKE_IRQPRIO_SPE_FP_DATA:
	case BOOKE_IRQPRIO_SPE_FP_ROUND:
	case BOOKE_IRQPRIO_AP_UNAVAIL:
	case BOOKE_IRQPRIO_ALIGNMENT:
		allowed = 1;
		msr_mask = MSR_CE|MSR_ME|MSR_DE;
		break;
	case BOOKE_IRQPRIO_CRITICAL:
	case BOOKE_IRQPRIO_WATCHDOG:
		allowed = vcpu->arch.shared->msr & MSR_CE;
		msr_mask = MSR_ME;
		break;
	case BOOKE_IRQPRIO_MACHINE_CHECK:
		allowed = vcpu->arch.shared->msr & MSR_ME;
		msr_mask = 0;
		break;
	case BOOKE_IRQPRIO_DECREMENTER:
	case BOOKE_IRQPRIO_FIT:
		keep_irq = true;
		/* fall through */
	case BOOKE_IRQPRIO_EXTERNAL:
		allowed = vcpu->arch.shared->msr & MSR_EE;
		allowed = allowed && !crit;
		msr_mask = MSR_CE|MSR_ME|MSR_DE;
		break;
	case BOOKE_IRQPRIO_DEBUG:
		allowed = vcpu->arch.shared->msr & MSR_DE;
		msr_mask = MSR_ME;
		break;
	}

	if (allowed) {
		vcpu->arch.shared->srr0 = vcpu->arch.pc;
		vcpu->arch.shared->srr1 = vcpu->arch.shared->msr;
		vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
		if (update_esr == true)
			vcpu->arch.shared->esr = vcpu->arch.queued_esr;
		if (update_dear == true)
			vcpu->arch.shared->dar = vcpu->arch.queued_dear;
		kvmppc_set_msr(vcpu, vcpu->arch.shared->msr & msr_mask);

		if (!keep_irq)
			clear_bit(priority, &vcpu->arch.pending_exceptions);
	}

	return allowed;
}
Esempio n. 17
0
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
    u32 inst = kvmppc_get_last_inst(vcpu);
    int ra = get_ra(inst);
    int rs = get_rs(inst);
    int rt = get_rt(inst);
    int sprn = get_sprn(inst);
    enum emulation_result emulated = EMULATE_DONE;
    int advance = 1;

    /* this default type might be overwritten by subcategories */
    kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

    pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

    switch (get_op(inst)) {
    case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
    case OP_TRAP_64:
        kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
        kvmppc_core_queue_program(vcpu,
                                  vcpu->arch.shared->esr | ESR_PTR);
#endif
        advance = 0;
        break;

    case 31:
        switch (get_xop(inst)) {

        case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
        case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
            kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
            kvmppc_core_queue_program(vcpu,
                                      vcpu->arch.shared->esr | ESR_PTR);
#endif
            advance = 0;
            break;
        case OP_31_XOP_LWZX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
            break;

        case OP_31_XOP_LBZX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
            break;

        case OP_31_XOP_LBZUX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_STWX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           4, 1);
            break;

        case OP_31_XOP_STBX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           1, 1);
            break;

        case OP_31_XOP_STBUX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           1, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_LHAX:
            emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
            break;

        case OP_31_XOP_LHZX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
            break;

        case OP_31_XOP_LHZUX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_MFSPR:
            emulated = kvmppc_emulate_mfspr(vcpu, sprn, rt);
            break;

        case OP_31_XOP_STHX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           2, 1);
            break;

        case OP_31_XOP_STHUX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           2, 1);
            kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
            break;

        case OP_31_XOP_MTSPR:
            emulated = kvmppc_emulate_mtspr(vcpu, sprn, rs);
            break;

        case OP_31_XOP_DCBST:
        case OP_31_XOP_DCBF:
        case OP_31_XOP_DCBI:
            /* Do nothing. The guest is performing dcbi because
             * hardware DMA is not snooped by the dcache, but
             * emulated DMA either goes through the dcache as
             * normal writes, or the host kernel has handled dcache
             * coherence. */
            break;

        case OP_31_XOP_LWBRX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
            break;

        case OP_31_XOP_TLBSYNC:
            break;

        case OP_31_XOP_STWBRX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           4, 0);
            break;

        case OP_31_XOP_LHBRX:
            emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
            break;

        case OP_31_XOP_STHBRX:
            emulated = kvmppc_handle_store(run, vcpu,
                                           kvmppc_get_gpr(vcpu, rs),
                                           2, 0);
            break;

        default:
            /* Attempt core-specific emulation below. */
            emulated = EMULATE_FAIL;
        }
        break;

    case OP_LWZ:
        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
        break;

    /* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
    case OP_LD:
        rt = get_rt(inst);
        emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
        break;

    case OP_LWZU:
        emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_LBZ:
        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
        break;

    case OP_LBZU:
        emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_STW:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       4, 1);
        break;

    /* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
    case OP_STD:
        rs = get_rs(inst);
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       8, 1);
        break;

    case OP_STWU:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       4, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_STB:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       1, 1);
        break;

    case OP_STBU:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       1, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_LHZ:
        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
        break;

    case OP_LHZU:
        emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_LHA:
        emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
        break;

    case OP_LHAU:
        emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    case OP_STH:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       2, 1);
        break;

    case OP_STHU:
        emulated = kvmppc_handle_store(run, vcpu,
                                       kvmppc_get_gpr(vcpu, rs),
                                       2, 1);
        kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
        break;

    default:
        emulated = EMULATE_FAIL;
    }

    if (emulated == EMULATE_FAIL) {
        emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
        if (emulated == EMULATE_AGAIN) {
            advance = 0;
        } else if (emulated == EMULATE_FAIL) {
            advance = 0;
            printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
                   "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
            kvmppc_core_queue_program(vcpu, 0);
        }
    }

    trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

    /* Advance past emulated instruction. */
    if (advance)
        kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

    return emulated;
}
Esempio n. 18
0
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u32 inst = kvmppc_get_last_inst(vcpu);
	int ra = get_ra(inst);
	int rs = get_rs(inst);
	int rt = get_rt(inst);
	int sprn = get_sprn(inst);
	enum emulation_result emulated = EMULATE_DONE;
	int advance = 1;
	ulong spr_val = 0;

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

	pr_debug("Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

	switch (get_op(inst)) {
	case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
	case OP_TRAP_64:
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
		kvmppc_core_queue_program(vcpu,
					  vcpu->arch.shared->esr | ESR_PTR);
#endif
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

		case OP_31_XOP_TRAP:
#ifdef CONFIG_64BIT
		case OP_31_XOP_TRAP_64:
#endif
#ifdef CONFIG_PPC_BOOK3S
			kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
			kvmppc_core_queue_program(vcpu,
					vcpu->arch.shared->esr | ESR_PTR);
#endif
			advance = 0;
			break;
		case OP_31_XOP_LWZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

		case OP_31_XOP_LBZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

		case OP_31_XOP_LBZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_STWX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 1);
			break;

		case OP_31_XOP_STBX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			break;

		case OP_31_XOP_STBUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_LHAX:
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_MFSPR:
			switch (sprn) {
			case SPRN_SRR0:
				spr_val = vcpu->arch.shared->srr0;
				break;
			case SPRN_SRR1:
				spr_val = vcpu->arch.shared->srr1;
				break;
			case SPRN_PVR:
				spr_val = vcpu->arch.pvr;
				break;
			case SPRN_PIR:
				spr_val = vcpu->vcpu_id;
				break;
			case SPRN_MSSSR0:
				spr_val = 0;
				break;

			/* Note: mftb and TBRL/TBWL are user-accessible, so
			 * the guest can always access the real TB anyways.
			 * In fact, we probably will never see these traps. */
			case SPRN_TBWL:
				spr_val = get_tb() >> 32;
				break;
			case SPRN_TBWU:
				spr_val = get_tb();
				break;

			case SPRN_SPRG0:
				spr_val = vcpu->arch.shared->sprg0;
				break;
			case SPRN_SPRG1:
				spr_val = vcpu->arch.shared->sprg1;
				break;
			case SPRN_SPRG2:
				spr_val = vcpu->arch.shared->sprg2;
				break;
			case SPRN_SPRG3:
				spr_val = vcpu->arch.shared->sprg3;
				break;
			/* Note: SPRG4-7 are user-readable, so we don't get
			 * a trap. */

			case SPRN_DEC:
				spr_val = kvmppc_get_dec(vcpu, get_tb());
				break;
			default:
				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn,
								     &spr_val);
				if (unlikely(emulated == EMULATE_FAIL)) {
					printk(KERN_INFO "mfspr: unknown spr "
						"0x%x\n", sprn);
				}
				break;
			}
			kvmppc_set_gpr(vcpu, rt, spr_val);
			kvmppc_set_exit_type(vcpu, EMULATED_MFSPR_EXITS);
			break;

		case OP_31_XOP_STHX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			break;

		case OP_31_XOP_STHUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_MTSPR:
			spr_val = kvmppc_get_gpr(vcpu, rs);
			switch (sprn) {
			case SPRN_SRR0:
				vcpu->arch.shared->srr0 = spr_val;
				break;
			case SPRN_SRR1:
				vcpu->arch.shared->srr1 = spr_val;
				break;

			/* XXX We need to context-switch the timebase for
			 * watchdog and FIT. */
			case SPRN_TBWL: break;
			case SPRN_TBWU: break;

			case SPRN_MSSSR0: break;

			case SPRN_DEC:
				vcpu->arch.dec = spr_val;
				kvmppc_emulate_dec(vcpu);
				break;

			case SPRN_SPRG0:
				vcpu->arch.shared->sprg0 = spr_val;
				break;
			case SPRN_SPRG1:
				vcpu->arch.shared->sprg1 = spr_val;
				break;
			case SPRN_SPRG2:
				vcpu->arch.shared->sprg2 = spr_val;
				break;
			case SPRN_SPRG3:
				vcpu->arch.shared->sprg3 = spr_val;
				break;

			default:
				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn,
								     spr_val);
				if (emulated == EMULATE_FAIL)
					printk(KERN_INFO "mtspr: unknown spr "
						"0x%x\n", sprn);
				break;
			}
			kvmppc_set_exit_type(vcpu, EMULATED_MTSPR_EXITS);
			break;

		case OP_31_XOP_DCBF:
		case OP_31_XOP_DCBI:
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

		case OP_31_XOP_LWBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

		case OP_31_XOP_TLBSYNC:
			break;

		case OP_31_XOP_STWBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 0);
			break;

		case OP_31_XOP_LHBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

		case OP_31_XOP_STHBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 0);
			break;

		default:
			/* Attempt core-specific emulation below. */
			emulated = EMULATE_FAIL;
		}
		break;

	case OP_LWZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
	case OP_LD:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
		break;

	case OP_LWZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LBZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

	case OP_LBZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STW:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		break;

	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
	case OP_STD:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               8, 1);
		break;

	case OP_STWU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STB:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		break;

	case OP_STBU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

	case OP_LHZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHA:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STH:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		break;

	case OP_STHU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	default:
		emulated = EMULATE_FAIL;
	}

	if (emulated == EMULATE_FAIL) {
		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
			kvmppc_core_queue_program(vcpu, 0);
		}
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}
Esempio n. 19
0
int kvmppc_booke_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                            unsigned int inst, int *advance)
{
	int emulated = EMULATE_DONE;
	int rs = get_rs(inst);
	int rt = get_rt(inst);

	switch (get_op(inst)) {
	case 19:
		switch (get_xop(inst)) {
		case OP_19_XOP_RFI:
			kvmppc_emul_rfi(vcpu);
			kvmppc_set_exit_type(vcpu, EMULATED_RFI_EXITS);
			*advance = 0;
			break;

		case OP_19_XOP_RFCI:
			kvmppc_emul_rfci(vcpu);
			kvmppc_set_exit_type(vcpu, EMULATED_RFCI_EXITS);
			*advance = 0;
			break;

		default:
			emulated = EMULATE_FAIL;
			break;
		}
		break;

	case 31:
		switch (get_xop(inst)) {

		case OP_31_XOP_MFMSR:
			kvmppc_set_gpr(vcpu, rt, vcpu->arch.shared->msr);
			kvmppc_set_exit_type(vcpu, EMULATED_MFMSR_EXITS);
			break;

		case OP_31_XOP_MTMSR:
			kvmppc_set_exit_type(vcpu, EMULATED_MTMSR_EXITS);
			kvmppc_set_msr(vcpu, kvmppc_get_gpr(vcpu, rs));
			break;

		case OP_31_XOP_WRTEE:
			vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
					| (kvmppc_get_gpr(vcpu, rs) & MSR_EE);
			kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
			break;

		case OP_31_XOP_WRTEEI:
			vcpu->arch.shared->msr = (vcpu->arch.shared->msr & ~MSR_EE)
							 | (inst & MSR_EE);
			kvmppc_set_exit_type(vcpu, EMULATED_WRTEE_EXITS);
			break;

		default:
			emulated = EMULATE_FAIL;
		}

		break;

	default:
		emulated = EMULATE_FAIL;
	}

	return emulated;
}
Esempio n. 20
0
int kvmppc_kvm_pv(struct kvm_vcpu *vcpu)
{
	int nr = kvmppc_get_gpr(vcpu, 11);
	int r;
	unsigned long __maybe_unused param1 = kvmppc_get_gpr(vcpu, 3);
	unsigned long __maybe_unused param2 = kvmppc_get_gpr(vcpu, 4);
	unsigned long __maybe_unused param3 = kvmppc_get_gpr(vcpu, 5);
	unsigned long __maybe_unused param4 = kvmppc_get_gpr(vcpu, 6);
	unsigned long r2 = 0;

	if (!(kvmppc_get_msr(vcpu) & MSR_SF)) {
		/* 32 bit mode */
		param1 &= 0xffffffff;
		param2 &= 0xffffffff;
		param3 &= 0xffffffff;
		param4 &= 0xffffffff;
	}

	switch (nr) {
	case KVM_HCALL_TOKEN(KVM_HC_PPC_MAP_MAGIC_PAGE):
	{
#if defined(CONFIG_PPC_BOOK3S_64) && defined(CONFIG_KVM_BOOK3S_PR_POSSIBLE)
		/* Book3S can be little endian, find it out here */
		int shared_big_endian = true;
		if (vcpu->arch.intr_msr & MSR_LE)
			shared_big_endian = false;
		if (shared_big_endian != vcpu->arch.shared_big_endian)
			kvmppc_swab_shared(vcpu);
		vcpu->arch.shared_big_endian = shared_big_endian;
#endif

		if (!(param2 & MAGIC_PAGE_FLAG_NOT_MAPPED_NX)) {
			/*
			 * Older versions of the Linux magic page code had
			 * a bug where they would map their trampoline code
			 * NX. If that's the case, remove !PR NX capability.
			 */
			vcpu->arch.disable_kernel_nx = true;
			kvm_make_request(KVM_REQ_TLB_FLUSH, vcpu);
		}

		vcpu->arch.magic_page_pa = param1 & ~0xfffULL;
		vcpu->arch.magic_page_ea = param2 & ~0xfffULL;

#ifdef CONFIG_PPC_64K_PAGES
		/*
		 * Make sure our 4k magic page is in the same window of a 64k
		 * page within the guest and within the host's page.
		 */
		if ((vcpu->arch.magic_page_pa & 0xf000) !=
		    ((ulong)vcpu->arch.shared & 0xf000)) {
			void *old_shared = vcpu->arch.shared;
			ulong shared = (ulong)vcpu->arch.shared;
			void *new_shared;

			shared &= PAGE_MASK;
			shared |= vcpu->arch.magic_page_pa & 0xf000;
			new_shared = (void*)shared;
			memcpy(new_shared, old_shared, 0x1000);
			vcpu->arch.shared = new_shared;
		}
#endif

		r2 = KVM_MAGIC_FEAT_SR | KVM_MAGIC_FEAT_MAS0_TO_SPRG7;

		r = EV_SUCCESS;
		break;
	}
	case KVM_HCALL_TOKEN(KVM_HC_FEATURES):
		r = EV_SUCCESS;
#if defined(CONFIG_PPC_BOOK3S) || defined(CONFIG_KVM_E500V2)
		r2 |= (1 << KVM_FEATURE_MAGIC_PAGE);
#endif

		/* Second return value is in r4 */
		break;
	case EV_HCALL_TOKEN(EV_IDLE):
		r = EV_SUCCESS;
		kvm_vcpu_block(vcpu);
		clear_bit(KVM_REQ_UNHALT, &vcpu->requests);
		break;
	default:
		r = EV_UNIMPLEMENTED;
		break;
	}

	kvmppc_set_gpr(vcpu, 4, r2);

	return r;
}
Esempio n. 21
0
/* XXX Should probably auto-generate instruction decoding for a particular core
 * from opcode tables in the future. */
int kvmppc_emulate_instruction(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u32 inst = kvmppc_get_last_inst(vcpu);
	u32 ea;
	int ra;
	int rb;
	int rs;
	int rt;
	int sprn;
	enum emulation_result emulated = EMULATE_DONE;
	int advance = 1;

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

	pr_debug(KERN_INFO "Emulating opcode %d / %d\n", get_op(inst), get_xop(inst));

	switch (get_op(inst)) {
	case OP_TRAP:
#ifdef CONFIG_PPC_BOOK3S
	case OP_TRAP_64:
		kvmppc_core_queue_program(vcpu, SRR1_PROGTRAP);
#else
		kvmppc_core_queue_program(vcpu, vcpu->arch.esr | ESR_PTR);
#endif
		advance = 0;
		break;

	case 31:
		switch (get_xop(inst)) {

		case OP_31_XOP_LWZX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

		case OP_31_XOP_LBZX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

		case OP_31_XOP_LBZUX:
			rt = get_rt(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			kvmppc_set_gpr(vcpu, ra, ea);
			break;

		case OP_31_XOP_STWX:
			rs = get_rs(inst);
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 1);
			break;

		case OP_31_XOP_STBX:
			rs = get_rs(inst);
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			break;

		case OP_31_XOP_STBUX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			kvmppc_set_gpr(vcpu, rs, ea);
			break;

		case OP_31_XOP_LHAX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZUX:
			rt = get_rt(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			kvmppc_set_gpr(vcpu, ra, ea);
			break;

		case OP_31_XOP_MFSPR:
			sprn = get_sprn(inst);
			rt = get_rt(inst);

			switch (sprn) {
			case SPRN_SRR0:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr0); break;
			case SPRN_SRR1:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.srr1); break;
			case SPRN_PVR:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.pvr); break;
			case SPRN_PIR:
				kvmppc_set_gpr(vcpu, rt, vcpu->vcpu_id); break;
			case SPRN_MSSSR0:
				kvmppc_set_gpr(vcpu, rt, 0); break;

			/* Note: mftb and TBRL/TBWL are user-accessible, so
			 * the guest can always access the real TB anyways.
			 * In fact, we probably will never see these traps. */
			case SPRN_TBWL:
				kvmppc_set_gpr(vcpu, rt, get_tb() >> 32); break;
			case SPRN_TBWU:
				kvmppc_set_gpr(vcpu, rt, get_tb()); break;

			case SPRN_SPRG0:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg0); break;
			case SPRN_SPRG1:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg1); break;
			case SPRN_SPRG2:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg2); break;
			case SPRN_SPRG3:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.sprg3); break;
			/* Note: SPRG4-7 are user-readable, so we don't get
			 * a trap. */

			case SPRN_DEC:
			{
				u64 jd = get_tb() - vcpu->arch.dec_jiffies;
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.dec - jd);
				pr_debug(KERN_INFO "mfDEC: %x - %llx = %lx\n",
					 vcpu->arch.dec, jd,
					 kvmppc_get_gpr(vcpu, rt));
				break;
			}
			default:
				emulated = kvmppc_core_emulate_mfspr(vcpu, sprn, rt);
				if (emulated == EMULATE_FAIL) {
					printk("mfspr: unknown spr %x\n", sprn);
					kvmppc_set_gpr(vcpu, rt, 0);
				}
				break;
			}
			break;

		case OP_31_XOP_STHX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			break;

		case OP_31_XOP_STHUX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			ea = kvmppc_get_gpr(vcpu, rb);
			if (ra)
				ea += kvmppc_get_gpr(vcpu, ra);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			kvmppc_set_gpr(vcpu, ra, ea);
			break;

		case OP_31_XOP_MTSPR:
			sprn = get_sprn(inst);
			rs = get_rs(inst);
			switch (sprn) {
			case SPRN_SRR0:
				vcpu->arch.srr0 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SRR1:
				vcpu->arch.srr1 = kvmppc_get_gpr(vcpu, rs); break;

			/* XXX We need to context-switch the timebase for
			 * watchdog and FIT. */
			case SPRN_TBWL: break;
			case SPRN_TBWU: break;

			case SPRN_MSSSR0: break;

			case SPRN_DEC:
				vcpu->arch.dec = kvmppc_get_gpr(vcpu, rs);
				kvmppc_emulate_dec(vcpu);
				break;

			case SPRN_SPRG0:
				vcpu->arch.sprg0 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SPRG1:
				vcpu->arch.sprg1 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SPRG2:
				vcpu->arch.sprg2 = kvmppc_get_gpr(vcpu, rs); break;
			case SPRN_SPRG3:
				vcpu->arch.sprg3 = kvmppc_get_gpr(vcpu, rs); break;

			default:
				emulated = kvmppc_core_emulate_mtspr(vcpu, sprn, rs);
				if (emulated == EMULATE_FAIL)
					printk("mtspr: unknown spr %x\n", sprn);
				break;
			}
			break;

		case OP_31_XOP_DCBI:
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

		case OP_31_XOP_LWBRX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

		case OP_31_XOP_TLBSYNC:
			break;

		case OP_31_XOP_STWBRX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 0);
			break;

		case OP_31_XOP_LHBRX:
			rt = get_rt(inst);
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

		case OP_31_XOP_STHBRX:
			rs = get_rs(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);

			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 0);
			break;

		default:
			/* Attempt core-specific emulation below. */
			emulated = EMULATE_FAIL;
		}
		break;

	case OP_LWZ:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

	case OP_LWZU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_LBZ:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

	case OP_LBZU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_STW:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		break;

	case OP_STWU:
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_STB:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		break;

	case OP_STBU:
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_LHZ:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

	case OP_LHZU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_LHA:
		rt = get_rt(inst);
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		ra = get_ra(inst);
		rt = get_rt(inst);
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	case OP_STH:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		break;

	case OP_STHU:
		ra = get_ra(inst);
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.paddr_accessed);
		break;

	default:
		emulated = EMULATE_FAIL;
	}

	if (emulated == EMULATE_FAIL) {
		emulated = kvmppc_core_emulate_op(run, vcpu, inst, &advance);
		if (emulated == EMULATE_AGAIN) {
			advance = 0;
		} else if (emulated == EMULATE_FAIL) {
			advance = 0;
			printk(KERN_ERR "Couldn't emulate instruction 0x%08x "
			       "(op %d xop %d)\n", inst, get_op(inst), get_xop(inst));
			kvmppc_core_queue_program(vcpu, 0);
		}
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}
Esempio n. 22
0
int kvmppc_core_emulate_op(struct kvm_run *run, struct kvm_vcpu *vcpu,
                           unsigned int inst, int *advance)
{
	int emulated = EMULATE_DONE;
	int dcrn;
	int ra;
	int rb;
	int rc;
	int rs;
	int rt;
	int ws;

	switch (get_op(inst)) {
	case 31:
		switch (get_xop(inst)) {

		case XOP_MFDCR:
			dcrn = get_dcrn(inst);
			rt = get_rt(inst);

			/* The guest may access CPR0 registers to determine the timebase
			 * frequency, and it must know the real host frequency because it
			 * can directly access the timebase registers.
			 *
			 * It would be possible to emulate those accesses in userspace,
			 * but userspace can really only figure out the end frequency.
			 * We could decompose that into the factors that compute it, but
			 * that's tricky math, and it's easier to just report the real
			 * CPR0 values.
			 */
			switch (dcrn) {
			case DCRN_CPR0_CONFIG_ADDR:
				kvmppc_set_gpr(vcpu, rt, vcpu->arch.cpr0_cfgaddr);
				break;
			case DCRN_CPR0_CONFIG_DATA:
				local_irq_disable();
				mtdcr(DCRN_CPR0_CONFIG_ADDR,
					  vcpu->arch.cpr0_cfgaddr);
				kvmppc_set_gpr(vcpu, rt,
					       mfdcr(DCRN_CPR0_CONFIG_DATA));
				local_irq_enable();
				break;
			default:
				run->dcr.dcrn = dcrn;
				run->dcr.data =  0;
				run->dcr.is_write = 0;
				vcpu->arch.io_gpr = rt;
				vcpu->arch.dcr_needed = 1;
				kvmppc_account_exit(vcpu, DCR_EXITS);
				emulated = EMULATE_DO_DCR;
			}

			break;

		case XOP_MTDCR:
			dcrn = get_dcrn(inst);
			rs = get_rs(inst);

			/* emulate some access in kernel */
			switch (dcrn) {
			case DCRN_CPR0_CONFIG_ADDR:
				vcpu->arch.cpr0_cfgaddr = kvmppc_get_gpr(vcpu, rs);
				break;
			default:
				run->dcr.dcrn = dcrn;
				run->dcr.data = kvmppc_get_gpr(vcpu, rs);
				run->dcr.is_write = 1;
				vcpu->arch.dcr_needed = 1;
				kvmppc_account_exit(vcpu, DCR_EXITS);
				emulated = EMULATE_DO_DCR;
			}

			break;

		case XOP_TLBWE:
			ra = get_ra(inst);
			rs = get_rs(inst);
			ws = get_ws(inst);
			emulated = kvmppc_44x_emul_tlbwe(vcpu, ra, rs, ws);
			break;

		case XOP_TLBSX:
			rt = get_rt(inst);
			ra = get_ra(inst);
			rb = get_rb(inst);
			rc = get_rc(inst);
			emulated = kvmppc_44x_emul_tlbsx(vcpu, rt, ra, rb, rc);
			break;

		case XOP_ICCCI:
			break;

		default:
			emulated = EMULATE_FAIL;
		}

		break;

	default:
		emulated = EMULATE_FAIL;
	}

	if (emulated == EMULATE_FAIL)
		emulated = kvmppc_booke_emulate_op(run, vcpu, inst, advance);

	return emulated;
}
Esempio n. 23
0
/* Deliver the interrupt of the corresponding priority, if possible. */
static int kvmppc_booke_irqprio_deliver(struct kvm_vcpu *vcpu,
                                        unsigned int priority)
{
	int allowed = 0;
	ulong msr_mask = 0;
	bool update_esr = false, update_dear = false, update_epr = false;
	ulong crit_raw = vcpu->arch.shared->critical;
	ulong crit_r1 = kvmppc_get_gpr(vcpu, 1);
	bool crit;
	bool keep_irq = false;
	enum int_class int_class;
	ulong new_msr = vcpu->arch.shared->msr;

	/* Truncate crit indicators in 32 bit mode */
	if (!(vcpu->arch.shared->msr & MSR_SF)) {
		crit_raw &= 0xffffffff;
		crit_r1 &= 0xffffffff;
	}

	/* Critical section when crit == r1 */
	crit = (crit_raw == crit_r1);
	/* ... and we're in supervisor mode */
	crit = crit && !(vcpu->arch.shared->msr & MSR_PR);

	if (priority == BOOKE_IRQPRIO_EXTERNAL_LEVEL) {
		priority = BOOKE_IRQPRIO_EXTERNAL;
		keep_irq = true;
	}

	if ((priority == BOOKE_IRQPRIO_EXTERNAL) && vcpu->arch.epr_flags)
		update_epr = true;

	switch (priority) {
	case BOOKE_IRQPRIO_DTLB_MISS:
	case BOOKE_IRQPRIO_DATA_STORAGE:
	case BOOKE_IRQPRIO_ALIGNMENT:
		update_dear = true;
		/* fall through */
	case BOOKE_IRQPRIO_INST_STORAGE:
	case BOOKE_IRQPRIO_PROGRAM:
		update_esr = true;
		/* fall through */
	case BOOKE_IRQPRIO_ITLB_MISS:
	case BOOKE_IRQPRIO_SYSCALL:
	case BOOKE_IRQPRIO_FP_UNAVAIL:
	case BOOKE_IRQPRIO_SPE_UNAVAIL:
	case BOOKE_IRQPRIO_SPE_FP_DATA:
	case BOOKE_IRQPRIO_SPE_FP_ROUND:
	case BOOKE_IRQPRIO_AP_UNAVAIL:
		allowed = 1;
		msr_mask = MSR_CE | MSR_ME | MSR_DE;
		int_class = INT_CLASS_NONCRIT;
		break;
	case BOOKE_IRQPRIO_WATCHDOG:
	case BOOKE_IRQPRIO_CRITICAL:
	case BOOKE_IRQPRIO_DBELL_CRIT:
		allowed = vcpu->arch.shared->msr & MSR_CE;
		allowed = allowed && !crit;
		msr_mask = MSR_ME;
		int_class = INT_CLASS_CRIT;
		break;
	case BOOKE_IRQPRIO_MACHINE_CHECK:
		allowed = vcpu->arch.shared->msr & MSR_ME;
		allowed = allowed && !crit;
		int_class = INT_CLASS_MC;
		break;
	case BOOKE_IRQPRIO_DECREMENTER:
	case BOOKE_IRQPRIO_FIT:
		keep_irq = true;
		/* fall through */
	case BOOKE_IRQPRIO_EXTERNAL:
	case BOOKE_IRQPRIO_DBELL:
		allowed = vcpu->arch.shared->msr & MSR_EE;
		allowed = allowed && !crit;
		msr_mask = MSR_CE | MSR_ME | MSR_DE;
		int_class = INT_CLASS_NONCRIT;
		break;
	case BOOKE_IRQPRIO_DEBUG:
		allowed = vcpu->arch.shared->msr & MSR_DE;
		allowed = allowed && !crit;
		msr_mask = MSR_ME;
		int_class = INT_CLASS_CRIT;
		break;
	}

	if (allowed) {
		switch (int_class) {
		case INT_CLASS_NONCRIT:
			set_guest_srr(vcpu, vcpu->arch.pc,
				      vcpu->arch.shared->msr);
			break;
		case INT_CLASS_CRIT:
			set_guest_csrr(vcpu, vcpu->arch.pc,
				       vcpu->arch.shared->msr);
			break;
		case INT_CLASS_DBG:
			set_guest_dsrr(vcpu, vcpu->arch.pc,
				       vcpu->arch.shared->msr);
			break;
		case INT_CLASS_MC:
			set_guest_mcsrr(vcpu, vcpu->arch.pc,
					vcpu->arch.shared->msr);
			break;
		}

		vcpu->arch.pc = vcpu->arch.ivpr | vcpu->arch.ivor[priority];
		if (update_esr == true)
			set_guest_esr(vcpu, vcpu->arch.queued_esr);
		if (update_dear == true)
			set_guest_dear(vcpu, vcpu->arch.queued_dear);
		if (update_epr == true) {
			if (vcpu->arch.epr_flags & KVMPPC_EPR_USER)
				kvm_make_request(KVM_REQ_EPR_EXIT, vcpu);
			else if (vcpu->arch.epr_flags & KVMPPC_EPR_KERNEL) {
				BUG_ON(vcpu->arch.irq_type != KVMPPC_IRQ_MPIC);
				kvmppc_mpic_set_epr(vcpu);
			}
		}

		new_msr &= msr_mask;
#if defined(CONFIG_64BIT)
		if (vcpu->arch.epcr & SPRN_EPCR_ICM)
			new_msr |= MSR_CM;
#endif
		kvmppc_set_msr(vcpu, new_msr);

		if (!keep_irq)
			clear_bit(priority, &vcpu->arch.pending_exceptions);
	}

#ifdef CONFIG_KVM_BOOKE_HV
	/*
	 * If an interrupt is pending but masked, raise a guest doorbell
	 * so that we are notified when the guest enables the relevant
	 * MSR bit.
	 */
	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_EE)
		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_NONCRIT);
	if (vcpu->arch.pending_exceptions & BOOKE_IRQMASK_CE)
		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_CRIT);
	if (vcpu->arch.pending_exceptions & BOOKE_IRQPRIO_MACHINE_CHECK)
		kvmppc_set_pending_interrupt(vcpu, INT_CLASS_MC);
#endif

	return allowed;
}
int kvmppc_booke_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
	int emulated = EMULATE_DONE;
	ulong spr_val = kvmppc_get_gpr(vcpu, rs);

	switch (sprn) {
	case SPRN_DEAR:
		vcpu->arch.shared->dar = spr_val; break;
	case SPRN_ESR:
		vcpu->arch.shared->esr = spr_val; break;
	case SPRN_DBCR0:
		vcpu->arch.dbcr0 = spr_val; break;
	case SPRN_DBCR1:
		vcpu->arch.dbcr1 = spr_val; break;
	case SPRN_DBSR:
		vcpu->arch.dbsr &= ~spr_val; break;
	case SPRN_TSR:
		kvmppc_clr_tsr_bits(vcpu, spr_val);
		break;
	case SPRN_TCR:
		kvmppc_set_tcr(vcpu, spr_val);
		break;

	/* Note: SPRG4-7 are user-readable. These values are
	 * loaded into the real SPRGs when resuming the
	 * guest. */
	case SPRN_SPRG4:
		vcpu->arch.shared->sprg4 = spr_val; break;
	case SPRN_SPRG5:
		vcpu->arch.shared->sprg5 = spr_val; break;
	case SPRN_SPRG6:
		vcpu->arch.shared->sprg6 = spr_val; break;
	case SPRN_SPRG7:
		vcpu->arch.shared->sprg7 = spr_val; break;

	case SPRN_IVPR:
		vcpu->arch.ivpr = spr_val;
		break;
	case SPRN_IVOR0:
		vcpu->arch.ivor[BOOKE_IRQPRIO_CRITICAL] = spr_val;
		break;
	case SPRN_IVOR1:
		vcpu->arch.ivor[BOOKE_IRQPRIO_MACHINE_CHECK] = spr_val;
		break;
	case SPRN_IVOR2:
		vcpu->arch.ivor[BOOKE_IRQPRIO_DATA_STORAGE] = spr_val;
		break;
	case SPRN_IVOR3:
		vcpu->arch.ivor[BOOKE_IRQPRIO_INST_STORAGE] = spr_val;
		break;
	case SPRN_IVOR4:
		vcpu->arch.ivor[BOOKE_IRQPRIO_EXTERNAL] = spr_val;
		break;
	case SPRN_IVOR5:
		vcpu->arch.ivor[BOOKE_IRQPRIO_ALIGNMENT] = spr_val;
		break;
	case SPRN_IVOR6:
		vcpu->arch.ivor[BOOKE_IRQPRIO_PROGRAM] = spr_val;
		break;
	case SPRN_IVOR7:
		vcpu->arch.ivor[BOOKE_IRQPRIO_FP_UNAVAIL] = spr_val;
		break;
	case SPRN_IVOR8:
		vcpu->arch.ivor[BOOKE_IRQPRIO_SYSCALL] = spr_val;
		break;
	case SPRN_IVOR9:
		vcpu->arch.ivor[BOOKE_IRQPRIO_AP_UNAVAIL] = spr_val;
		break;
	case SPRN_IVOR10:
		vcpu->arch.ivor[BOOKE_IRQPRIO_DECREMENTER] = spr_val;
		break;
	case SPRN_IVOR11:
		vcpu->arch.ivor[BOOKE_IRQPRIO_FIT] = spr_val;
		break;
	case SPRN_IVOR12:
		vcpu->arch.ivor[BOOKE_IRQPRIO_WATCHDOG] = spr_val;
		break;
	case SPRN_IVOR13:
		vcpu->arch.ivor[BOOKE_IRQPRIO_DTLB_MISS] = spr_val;
		break;
	case SPRN_IVOR14:
		vcpu->arch.ivor[BOOKE_IRQPRIO_ITLB_MISS] = spr_val;
		break;
	case SPRN_IVOR15:
		vcpu->arch.ivor[BOOKE_IRQPRIO_DEBUG] = spr_val;
		break;

	default:
		emulated = EMULATE_FAIL;
	}

	return emulated;
}
int kvmppc_core_emulate_mtspr(struct kvm_vcpu *vcpu, int sprn, int rs)
{
	struct kvmppc_vcpu_e500 *vcpu_e500 = to_e500(vcpu);
	int emulated = EMULATE_DONE;
	ulong spr_val = kvmppc_get_gpr(vcpu, rs);

	switch (sprn) {
	case SPRN_PID:
		kvmppc_set_pid(vcpu, spr_val);
		break;
	case SPRN_PID1:
		if (spr_val != 0)
			return EMULATE_FAIL;
		vcpu_e500->pid[1] = spr_val; break;
	case SPRN_PID2:
		if (spr_val != 0)
			return EMULATE_FAIL;
		vcpu_e500->pid[2] = spr_val; break;
	case SPRN_MAS0:
		vcpu->arch.shared->mas0 = spr_val; break;
	case SPRN_MAS1:
		vcpu->arch.shared->mas1 = spr_val; break;
	case SPRN_MAS2:
		vcpu->arch.shared->mas2 = spr_val; break;
	case SPRN_MAS3:
		vcpu->arch.shared->mas7_3 &= ~(u64)0xffffffff;
		vcpu->arch.shared->mas7_3 |= spr_val;
		break;
	case SPRN_MAS4:
		vcpu->arch.shared->mas4 = spr_val; break;
	case SPRN_MAS6:
		vcpu->arch.shared->mas6 = spr_val; break;
	case SPRN_MAS7:
		vcpu->arch.shared->mas7_3 &= (u64)0xffffffff;
		vcpu->arch.shared->mas7_3 |= (u64)spr_val << 32;
		break;
	case SPRN_L1CSR0:
		vcpu_e500->l1csr0 = spr_val;
		vcpu_e500->l1csr0 &= ~(L1CSR0_DCFI | L1CSR0_CLFC);
		break;
	case SPRN_L1CSR1:
		vcpu_e500->l1csr1 = spr_val; break;
	case SPRN_HID0:
		vcpu_e500->hid0 = spr_val; break;
	case SPRN_HID1:
		vcpu_e500->hid1 = spr_val; break;

	case SPRN_MMUCSR0:
		emulated = kvmppc_e500_emul_mt_mmucsr0(vcpu_e500,
				spr_val);
		break;

	/* extra exceptions */
	case SPRN_IVOR32:
		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_UNAVAIL] = spr_val;
		break;
	case SPRN_IVOR33:
		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_DATA] = spr_val;
		break;
	case SPRN_IVOR34:
		vcpu->arch.ivor[BOOKE_IRQPRIO_SPE_FP_ROUND] = spr_val;
		break;
	case SPRN_IVOR35:
		vcpu->arch.ivor[BOOKE_IRQPRIO_PERFORMANCE_MONITOR] = spr_val;
		break;

	default:
		emulated = kvmppc_booke_emulate_mtspr(vcpu, sprn, rs);
	}

	return emulated;
}
Esempio n. 26
0
int kvmppc_emulate_paired_single(struct kvm_run *run, struct kvm_vcpu *vcpu)
{
	u32 inst = kvmppc_get_last_inst(vcpu);
	enum emulation_result emulated = EMULATE_DONE;

	int ax_rd = inst_get_field(inst, 6, 10);
	int ax_ra = inst_get_field(inst, 11, 15);
	int ax_rb = inst_get_field(inst, 16, 20);
	int ax_rc = inst_get_field(inst, 21, 25);
	short full_d = inst_get_field(inst, 16, 31);

	u64 *fpr_d = &vcpu->arch.fpr[ax_rd];
	u64 *fpr_a = &vcpu->arch.fpr[ax_ra];
	u64 *fpr_b = &vcpu->arch.fpr[ax_rb];
	u64 *fpr_c = &vcpu->arch.fpr[ax_rc];

	bool rcomp = (inst & 1) ? true : false;
	u32 cr = kvmppc_get_cr(vcpu);
#ifdef DEBUG
	int i;
#endif

	if (!kvmppc_inst_is_paired_single(vcpu, inst))
		return EMULATE_FAIL;

	if (!(vcpu->arch.shared->msr & MSR_FP)) {
		kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_FP_UNAVAIL);
		return EMULATE_AGAIN;
	}

	kvmppc_giveup_ext(vcpu, MSR_FP);
	preempt_disable();
	enable_kernel_fp();
	/* Do we need to clear FE0 / FE1 here? Don't think so. */

#ifdef DEBUG
	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
		u32 f;
		kvm_cvt_df(&vcpu->arch.fpr[i], &f);
		dprintk(KERN_INFO "FPR[%d] = 0x%x / 0x%llx    QPR[%d] = 0x%x\n",
			i, f, vcpu->arch.fpr[i], i, vcpu->arch.qpr[i]);
	}
#endif

	switch (get_op(inst)) {
	case OP_PSQ_L:
	{
		ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
		bool w = inst_get_field(inst, 16, 16) ? true : false;
		int i = inst_get_field(inst, 17, 19);

		addr += get_d_signext(inst);
		emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
		break;
	}
	case OP_PSQ_LU:
	{
		ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
		bool w = inst_get_field(inst, 16, 16) ? true : false;
		int i = inst_get_field(inst, 17, 19);

		addr += get_d_signext(inst);
		emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);

		if (emulated == EMULATE_DONE)
			kvmppc_set_gpr(vcpu, ax_ra, addr);
		break;
	}
	case OP_PSQ_ST:
	{
		ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
		bool w = inst_get_field(inst, 16, 16) ? true : false;
		int i = inst_get_field(inst, 17, 19);

		addr += get_d_signext(inst);
		emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
		break;
	}
	case OP_PSQ_STU:
	{
		ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
		bool w = inst_get_field(inst, 16, 16) ? true : false;
		int i = inst_get_field(inst, 17, 19);

		addr += get_d_signext(inst);
		emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);

		if (emulated == EMULATE_DONE)
			kvmppc_set_gpr(vcpu, ax_ra, addr);
		break;
	}
	case 4:
		/* X form */
		switch (inst_get_field(inst, 21, 30)) {
		case OP_4X_PS_CMPU0:
			/* XXX */
			emulated = EMULATE_FAIL;
			break;
		case OP_4X_PSQ_LX:
		{
			ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
			bool w = inst_get_field(inst, 21, 21) ? true : false;
			int i = inst_get_field(inst, 22, 24);

			addr += kvmppc_get_gpr(vcpu, ax_rb);
			emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);
			break;
		}
		case OP_4X_PS_CMPO0:
			/* XXX */
			emulated = EMULATE_FAIL;
			break;
		case OP_4X_PSQ_LUX:
		{
			ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
			bool w = inst_get_field(inst, 21, 21) ? true : false;
			int i = inst_get_field(inst, 22, 24);

			addr += kvmppc_get_gpr(vcpu, ax_rb);
			emulated = kvmppc_emulate_psq_load(run, vcpu, ax_rd, addr, w, i);

			if (emulated == EMULATE_DONE)
				kvmppc_set_gpr(vcpu, ax_ra, addr);
			break;
		}
		case OP_4X_PS_NEG:
			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
			vcpu->arch.fpr[ax_rd] ^= 0x8000000000000000ULL;
			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
			vcpu->arch.qpr[ax_rd] ^= 0x80000000;
			break;
		case OP_4X_PS_CMPU1:
			/* XXX */
			emulated = EMULATE_FAIL;
			break;
		case OP_4X_PS_MR:
			WARN_ON(rcomp);
			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
			break;
		case OP_4X_PS_CMPO1:
			/* XXX */
			emulated = EMULATE_FAIL;
			break;
		case OP_4X_PS_NABS:
			WARN_ON(rcomp);
			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
			vcpu->arch.fpr[ax_rd] |= 0x8000000000000000ULL;
			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
			vcpu->arch.qpr[ax_rd] |= 0x80000000;
			break;
		case OP_4X_PS_ABS:
			WARN_ON(rcomp);
			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rb];
			vcpu->arch.fpr[ax_rd] &= ~0x8000000000000000ULL;
			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
			vcpu->arch.qpr[ax_rd] &= ~0x80000000;
			break;
		case OP_4X_PS_MERGE00:
			WARN_ON(rcomp);
			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
			/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
			kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
				   &vcpu->arch.qpr[ax_rd]);
			break;
		case OP_4X_PS_MERGE01:
			WARN_ON(rcomp);
			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_ra];
			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
			break;
		case OP_4X_PS_MERGE10:
			WARN_ON(rcomp);
			/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
			kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
				   &vcpu->arch.fpr[ax_rd]);
			/* vcpu->arch.qpr[ax_rd] = vcpu->arch.fpr[ax_rb]; */
			kvm_cvt_df(&vcpu->arch.fpr[ax_rb],
				   &vcpu->arch.qpr[ax_rd]);
			break;
		case OP_4X_PS_MERGE11:
			WARN_ON(rcomp);
			/* vcpu->arch.fpr[ax_rd] = vcpu->arch.qpr[ax_ra]; */
			kvm_cvt_fd(&vcpu->arch.qpr[ax_ra],
				   &vcpu->arch.fpr[ax_rd]);
			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rb];
			break;
		}
		/* XW form */
		switch (inst_get_field(inst, 25, 30)) {
		case OP_4XW_PSQ_STX:
		{
			ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;
			bool w = inst_get_field(inst, 21, 21) ? true : false;
			int i = inst_get_field(inst, 22, 24);

			addr += kvmppc_get_gpr(vcpu, ax_rb);
			emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);
			break;
		}
		case OP_4XW_PSQ_STUX:
		{
			ulong addr = kvmppc_get_gpr(vcpu, ax_ra);
			bool w = inst_get_field(inst, 21, 21) ? true : false;
			int i = inst_get_field(inst, 22, 24);

			addr += kvmppc_get_gpr(vcpu, ax_rb);
			emulated = kvmppc_emulate_psq_store(run, vcpu, ax_rd, addr, w, i);

			if (emulated == EMULATE_DONE)
				kvmppc_set_gpr(vcpu, ax_ra, addr);
			break;
		}
		}
		/* A form */
		switch (inst_get_field(inst, 26, 30)) {
		case OP_4A_PS_SUM1:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_rb, ax_ra, SCALAR_NO_PS0 | SCALAR_HIGH, fps_fadds);
			vcpu->arch.fpr[ax_rd] = vcpu->arch.fpr[ax_rc];
			break;
		case OP_4A_PS_SUM0:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rb, SCALAR_NO_PS1 | SCALAR_LOW, fps_fadds);
			vcpu->arch.qpr[ax_rd] = vcpu->arch.qpr[ax_rc];
			break;
		case OP_4A_PS_MULS0:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, SCALAR_HIGH, fps_fmuls);
			break;
		case OP_4A_PS_MULS1:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, SCALAR_LOW, fps_fmuls);
			break;
		case OP_4A_PS_MADDS0:
			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, ax_rb, SCALAR_HIGH, fps_fmadds);
			break;
		case OP_4A_PS_MADDS1:
			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, ax_rb, SCALAR_LOW, fps_fmadds);
			break;
		case OP_4A_PS_DIV:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rb, SCALAR_NONE, fps_fdivs);
			break;
		case OP_4A_PS_SUB:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rb, SCALAR_NONE, fps_fsubs);
			break;
		case OP_4A_PS_ADD:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rb, SCALAR_NONE, fps_fadds);
			break;
		case OP_4A_PS_SEL:
			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fsel);
			break;
		case OP_4A_PS_RES:
			emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
					ax_rb, fps_fres);
			break;
		case OP_4A_PS_MUL:
			emulated = kvmppc_ps_two_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, SCALAR_NONE, fps_fmuls);
			break;
		case OP_4A_PS_RSQRTE:
			emulated = kvmppc_ps_one_in(vcpu, rcomp, ax_rd,
					ax_rb, fps_frsqrte);
			break;
		case OP_4A_PS_MSUB:
			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmsubs);
			break;
		case OP_4A_PS_MADD:
			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fmadds);
			break;
		case OP_4A_PS_NMSUB:
			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmsubs);
			break;
		case OP_4A_PS_NMADD:
			emulated = kvmppc_ps_three_in(vcpu, rcomp, ax_rd,
					ax_ra, ax_rc, ax_rb, SCALAR_NONE, fps_fnmadds);
			break;
		}
		break;

	/* Real FPU operations */

	case OP_LFS:
	{
		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;

		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
						   FPU_LS_SINGLE);
		break;
	}
	case OP_LFSU:
	{
		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;

		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
						   FPU_LS_SINGLE);

		if (emulated == EMULATE_DONE)
			kvmppc_set_gpr(vcpu, ax_ra, addr);
		break;
	}
	case OP_LFD:
	{
		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;

		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
						   FPU_LS_DOUBLE);
		break;
	}
	case OP_LFDU:
	{
		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;

		emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd, addr,
						   FPU_LS_DOUBLE);

		if (emulated == EMULATE_DONE)
			kvmppc_set_gpr(vcpu, ax_ra, addr);
		break;
	}
	case OP_STFS:
	{
		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;

		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
						    FPU_LS_SINGLE);
		break;
	}
	case OP_STFSU:
	{
		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;

		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
						    FPU_LS_SINGLE);

		if (emulated == EMULATE_DONE)
			kvmppc_set_gpr(vcpu, ax_ra, addr);
		break;
	}
	case OP_STFD:
	{
		ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) + full_d;

		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
						    FPU_LS_DOUBLE);
		break;
	}
	case OP_STFDU:
	{
		ulong addr = kvmppc_get_gpr(vcpu, ax_ra) + full_d;

		emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd, addr,
						    FPU_LS_DOUBLE);

		if (emulated == EMULATE_DONE)
			kvmppc_set_gpr(vcpu, ax_ra, addr);
		break;
	}
	case 31:
		switch (inst_get_field(inst, 21, 30)) {
		case OP_31_LFSX:
		{
			ulong addr = ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0;

			addr += kvmppc_get_gpr(vcpu, ax_rb);
			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
							   addr, FPU_LS_SINGLE);
			break;
		}
		case OP_31_LFSUX:
		{
			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
							   addr, FPU_LS_SINGLE);

			if (emulated == EMULATE_DONE)
				kvmppc_set_gpr(vcpu, ax_ra, addr);
			break;
		}
		case OP_31_LFDX:
		{
			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
							   addr, FPU_LS_DOUBLE);
			break;
		}
		case OP_31_LFDUX:
		{
			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_load(run, vcpu, ax_rd,
							   addr, FPU_LS_DOUBLE);

			if (emulated == EMULATE_DONE)
				kvmppc_set_gpr(vcpu, ax_ra, addr);
			break;
		}
		case OP_31_STFSX:
		{
			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
							    addr, FPU_LS_SINGLE);
			break;
		}
		case OP_31_STFSUX:
		{
			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
							    addr, FPU_LS_SINGLE);

			if (emulated == EMULATE_DONE)
				kvmppc_set_gpr(vcpu, ax_ra, addr);
			break;
		}
		case OP_31_STFX:
		{
			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
							    addr, FPU_LS_DOUBLE);
			break;
		}
		case OP_31_STFUX:
		{
			ulong addr = kvmppc_get_gpr(vcpu, ax_ra) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
							    addr, FPU_LS_DOUBLE);

			if (emulated == EMULATE_DONE)
				kvmppc_set_gpr(vcpu, ax_ra, addr);
			break;
		}
		case OP_31_STFIWX:
		{
			ulong addr = (ax_ra ? kvmppc_get_gpr(vcpu, ax_ra) : 0) +
				     kvmppc_get_gpr(vcpu, ax_rb);

			emulated = kvmppc_emulate_fpr_store(run, vcpu, ax_rd,
							    addr,
							    FPU_LS_SINGLE_LOW);
			break;
		}
			break;
		}
		break;
	case 59:
		switch (inst_get_field(inst, 21, 30)) {
		case OP_59_FADDS:
			fpd_fadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FSUBS:
			fpd_fsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FDIVS:
			fpd_fdivs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FRES:
			fpd_fres(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FRSQRTES:
			fpd_frsqrtes(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		}
		switch (inst_get_field(inst, 26, 30)) {
		case OP_59_FMULS:
			fpd_fmuls(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FMSUBS:
			fpd_fmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FMADDS:
			fpd_fmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FNMSUBS:
			fpd_fnmsubs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_59_FNMADDS:
			fpd_fnmadds(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		}
		break;
	case 63:
		switch (inst_get_field(inst, 21, 30)) {
		case OP_63_MTFSB0:
		case OP_63_MTFSB1:
		case OP_63_MCRFS:
		case OP_63_MTFSFI:
			/* XXX need to implement */
			break;
		case OP_63_MFFS:
			/* XXX missing CR */
			*fpr_d = vcpu->arch.fpscr;
			break;
		case OP_63_MTFSF:
			/* XXX missing fm bits */
			/* XXX missing CR */
			vcpu->arch.fpscr = *fpr_b;
			break;
		case OP_63_FCMPU:
		{
			u32 tmp_cr;
			u32 cr0_mask = 0xf0000000;
			u32 cr_shift = inst_get_field(inst, 6, 8) * 4;

			fpd_fcmpu(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
			cr &= ~(cr0_mask >> cr_shift);
			cr |= (cr & cr0_mask) >> cr_shift;
			break;
		}
		case OP_63_FCMPO:
		{
			u32 tmp_cr;
			u32 cr0_mask = 0xf0000000;
			u32 cr_shift = inst_get_field(inst, 6, 8) * 4;

			fpd_fcmpo(&vcpu->arch.fpscr, &tmp_cr, fpr_a, fpr_b);
			cr &= ~(cr0_mask >> cr_shift);
			cr |= (cr & cr0_mask) >> cr_shift;
			break;
		}
		case OP_63_FNEG:
			fpd_fneg(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			break;
		case OP_63_FMR:
			*fpr_d = *fpr_b;
			break;
		case OP_63_FABS:
			fpd_fabs(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			break;
		case OP_63_FCPSGN:
			fpd_fcpsgn(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
			break;
		case OP_63_FDIV:
			fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
			break;
		case OP_63_FADD:
			fpd_fadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
			break;
		case OP_63_FSUB:
			fpd_fsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_b);
			break;
		case OP_63_FCTIW:
			fpd_fctiw(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			break;
		case OP_63_FCTIWZ:
			fpd_fctiwz(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			break;
		case OP_63_FRSP:
			fpd_frsp(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			kvmppc_sync_qpr(vcpu, ax_rd);
			break;
		case OP_63_FRSQRTE:
		{
			double one = 1.0f;

			/* fD = sqrt(fB) */
			fpd_fsqrt(&vcpu->arch.fpscr, &cr, fpr_d, fpr_b);
			/* fD = 1.0f / fD */
			fpd_fdiv(&vcpu->arch.fpscr, &cr, fpr_d, (u64*)&one, fpr_d);
			break;
		}
		}
		switch (inst_get_field(inst, 26, 30)) {
		case OP_63_FMUL:
			fpd_fmul(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c);
			break;
		case OP_63_FSEL:
			fpd_fsel(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			break;
		case OP_63_FMSUB:
			fpd_fmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			break;
		case OP_63_FMADD:
			fpd_fmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			break;
		case OP_63_FNMSUB:
			fpd_fnmsub(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			break;
		case OP_63_FNMADD:
			fpd_fnmadd(&vcpu->arch.fpscr, &cr, fpr_d, fpr_a, fpr_c, fpr_b);
			break;
		}
		break;
	}

#ifdef DEBUG
	for (i = 0; i < ARRAY_SIZE(vcpu->arch.fpr); i++) {
		u32 f;
		kvm_cvt_df(&vcpu->arch.fpr[i], &f);
		dprintk(KERN_INFO "FPR[%d] = 0x%x\n", i, f);
	}
#endif

	if (rcomp)
		kvmppc_set_cr(vcpu, cr);

	preempt_enable();

	return emulated;
}
/* XXX to do:
 * lhax
 * lhaux
 * lswx
 * lswi
 * stswx
 * stswi
 * lha
 * lhau
 * lmw
 * stmw
 *
 */
int kvmppc_emulate_loadstore(struct kvm_vcpu *vcpu)
{
	struct kvm_run *run = vcpu->run;
	u32 inst;
	int ra, rs, rt;
	enum emulation_result emulated;
	int advance = 1;

	/* this default type might be overwritten by subcategories */
	kvmppc_set_exit_type(vcpu, EMULATED_INST_EXITS);

	emulated = kvmppc_get_last_inst(vcpu, false, &inst);
	if (emulated != EMULATE_DONE)
		return emulated;

	ra = get_ra(inst);
	rs = get_rs(inst);
	rt = get_rt(inst);

	switch (get_op(inst)) {
	case 31:
		switch (get_xop(inst)) {
		case OP_31_XOP_LWZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
			break;

		case OP_31_XOP_LBZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			break;

		case OP_31_XOP_LBZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_STWX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 1);
			break;

		case OP_31_XOP_STBX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			break;

		case OP_31_XOP_STBUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               1, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_LHAX:
			emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			break;

		case OP_31_XOP_LHZUX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_STHX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			break;

		case OP_31_XOP_STHUX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 1);
			kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
			break;

		case OP_31_XOP_DCBST:
		case OP_31_XOP_DCBF:
		case OP_31_XOP_DCBI:
			/* Do nothing. The guest is performing dcbi because
			 * hardware DMA is not snooped by the dcache, but
			 * emulated DMA either goes through the dcache as
			 * normal writes, or the host kernel has handled dcache
			 * coherence. */
			break;

		case OP_31_XOP_LWBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 4, 0);
			break;

		case OP_31_XOP_STWBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               4, 0);
			break;

		case OP_31_XOP_LHBRX:
			emulated = kvmppc_handle_load(run, vcpu, rt, 2, 0);
			break;

		case OP_31_XOP_STHBRX:
			emulated = kvmppc_handle_store(run, vcpu,
						       kvmppc_get_gpr(vcpu, rs),
			                               2, 0);
			break;

		default:
			emulated = EMULATE_FAIL;
			break;
		}
		break;

	case OP_LWZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		break;

	/* TBD: Add support for other 64 bit load variants like ldu, ldux, ldx etc. */
	case OP_LD:
		rt = get_rt(inst);
		emulated = kvmppc_handle_load(run, vcpu, rt, 8, 1);
		break;

	case OP_LWZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LBZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		break;

	case OP_LBZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STW:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		break;

	/* TBD: Add support for other 64 bit store variants like stdu, stdux, stdx etc. */
	case OP_STD:
		rs = get_rs(inst);
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               8, 1);
		break;

	case OP_STWU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               4, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STB:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		break;

	case OP_STBU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               1, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHZ:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		break;

	case OP_LHZU:
		emulated = kvmppc_handle_load(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_LHA:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		break;

	case OP_LHAU:
		emulated = kvmppc_handle_loads(run, vcpu, rt, 2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	case OP_STH:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		break;

	case OP_STHU:
		emulated = kvmppc_handle_store(run, vcpu,
					       kvmppc_get_gpr(vcpu, rs),
		                               2, 1);
		kvmppc_set_gpr(vcpu, ra, vcpu->arch.vaddr_accessed);
		break;

	default:
		emulated = EMULATE_FAIL;
		break;
	}

	if (emulated == EMULATE_FAIL) {
		advance = 0;
		kvmppc_core_queue_program(vcpu, 0);
	}

	trace_kvm_ppc_instr(inst, kvmppc_get_pc(vcpu), emulated);

	/* Advance past emulated instruction. */
	if (advance)
		kvmppc_set_pc(vcpu, kvmppc_get_pc(vcpu) + 4);

	return emulated;
}
Esempio n. 28
0
static int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
			      struct task_struct *tsk)
{
	int r = RESUME_HOST;

	vcpu->stat.sum_exits++;

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;
	switch (vcpu->arch.trap) {
	/* We're good on these - the host merely wanted to get our attention */
	case BOOK3S_INTERRUPT_HV_DECREMENTER:
		vcpu->stat.dec_exits++;
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_EXTERNAL:
		vcpu->stat.ext_intr_exits++;
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_PERFMON:
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_PROGRAM:
	{
		ulong flags;
		/*
		 * Normally program interrupts are delivered directly
		 * to the guest by the hardware, but we can get here
		 * as a result of a hypervisor emulation interrupt
		 * (e40) getting turned into a 700 by BML RTAS.
		 */
		flags = vcpu->arch.shregs.msr & 0x1f0000ull;
		kvmppc_core_queue_program(vcpu, flags);
		r = RESUME_GUEST;
		break;
	}
	case BOOK3S_INTERRUPT_SYSCALL:
	{
		/* hcall - punt to userspace */
		int i;

		if (vcpu->arch.shregs.msr & MSR_PR) {
			/* sc 1 from userspace - reflect to guest syscall */
			kvmppc_book3s_queue_irqprio(vcpu, BOOK3S_INTERRUPT_SYSCALL);
			r = RESUME_GUEST;
			break;
		}
		run->papr_hcall.nr = kvmppc_get_gpr(vcpu, 3);
		for (i = 0; i < 9; ++i)
			run->papr_hcall.args[i] = kvmppc_get_gpr(vcpu, 4 + i);
		run->exit_reason = KVM_EXIT_PAPR_HCALL;
		vcpu->arch.hcall_needed = 1;
		r = RESUME_HOST;
		break;
	}
	/*
	 * We get these next two if the guest does a bad real-mode access,
	 * as we have enabled VRMA (virtualized real mode area) mode in the
	 * LPCR.  We just generate an appropriate DSI/ISI to the guest.
	 */
	case BOOK3S_INTERRUPT_H_DATA_STORAGE:
		vcpu->arch.shregs.dsisr = vcpu->arch.fault_dsisr;
		vcpu->arch.shregs.dar = vcpu->arch.fault_dar;
		kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_DATA_STORAGE, 0);
		r = RESUME_GUEST;
		break;
	case BOOK3S_INTERRUPT_H_INST_STORAGE:
		kvmppc_inject_interrupt(vcpu, BOOK3S_INTERRUPT_INST_STORAGE,
					0x08000000);
		r = RESUME_GUEST;
		break;
	/*
	 * This occurs if the guest executes an illegal instruction.
	 * We just generate a program interrupt to the guest, since
	 * we don't emulate any guest instructions at this stage.
	 */
	case BOOK3S_INTERRUPT_H_EMUL_ASSIST:
		kvmppc_core_queue_program(vcpu, 0x80000);
		r = RESUME_GUEST;
		break;
	default:
		kvmppc_dump_regs(vcpu);
		printk(KERN_EMERG "trap=0x%x | pc=0x%lx | msr=0x%llx\n",
			vcpu->arch.trap, kvmppc_get_pc(vcpu),
			vcpu->arch.shregs.msr);
		r = RESUME_HOST;
		BUG();
		break;
	}


	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(tsk)) {
			vcpu->stat.signal_exits++;
			run->exit_reason = KVM_EXIT_INTR;
			r = -EINTR;
		} else {
			kvmppc_core_deliver_interrupts(vcpu);
		}
	}

	return r;
}
Esempio n. 29
0
/**
 * kvmppc_handle_exit
 *
 * Return value is in the form (errcode<<2 | RESUME_FLAG_HOST | RESUME_FLAG_NV)
 */
int kvmppc_handle_exit(struct kvm_run *run, struct kvm_vcpu *vcpu,
                       unsigned int exit_nr)
{
	enum emulation_result er;
	int r = RESUME_HOST;

	/* update before a new last_exit_type is rewritten */
	kvmppc_update_timing_stats(vcpu);

	local_irq_enable();

	run->exit_reason = KVM_EXIT_UNKNOWN;
	run->ready_for_interrupt_injection = 1;

	switch (exit_nr) {
	case BOOKE_INTERRUPT_MACHINE_CHECK:
		printk("MACHINE CHECK: %lx\n", mfspr(SPRN_MCSR));
		kvmppc_dump_vcpu(vcpu);
		r = RESUME_HOST;
		break;

	case BOOKE_INTERRUPT_EXTERNAL:
		kvmppc_account_exit(vcpu, EXT_INTR_EXITS);
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DECREMENTER:
		/* Since we switched IVPR back to the host's value, the host
		 * handled this interrupt the moment we enabled interrupts.
		 * Now we just offer it a chance to reschedule the guest. */
		kvmppc_account_exit(vcpu, DEC_EXITS);
		if (need_resched())
			cond_resched();
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_PROGRAM:
		if (vcpu->arch.shared->msr & MSR_PR) {
			/* Program traps generated by user-level software must be handled
			 * by the guest kernel. */
			kvmppc_core_queue_program(vcpu, vcpu->arch.fault_esr);
			r = RESUME_GUEST;
			kvmppc_account_exit(vcpu, USR_PR_INST);
			break;
		}

		er = kvmppc_emulate_instruction(run, vcpu);
		switch (er) {
		case EMULATE_DONE:
			/* don't overwrite subtypes, just account kvm_stats */
			kvmppc_account_exit_stat(vcpu, EMULATED_INST_EXITS);
			/* Future optimization: only reload non-volatiles if
			 * they were actually modified by emulation. */
			r = RESUME_GUEST_NV;
			break;
		case EMULATE_DO_DCR:
			run->exit_reason = KVM_EXIT_DCR;
			r = RESUME_HOST;
			break;
		case EMULATE_FAIL:
			/* XXX Deliver Program interrupt to guest. */
			printk(KERN_CRIT "%s: emulation at %lx failed (%08x)\n",
			       __func__, vcpu->arch.pc, vcpu->arch.last_inst);
			/* For debugging, encode the failing instruction and
			 * report it to userspace. */
			run->hw.hardware_exit_reason = ~0ULL << 32;
			run->hw.hardware_exit_reason |= vcpu->arch.last_inst;
			r = RESUME_HOST;
			break;
		default:
			BUG();
		}
		break;

	case BOOKE_INTERRUPT_FP_UNAVAIL:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_FP_UNAVAIL);
		kvmppc_account_exit(vcpu, FP_UNAVAIL);
		r = RESUME_GUEST;
		break;

#ifdef CONFIG_SPE
	case BOOKE_INTERRUPT_SPE_UNAVAIL: {
		if (vcpu->arch.shared->msr & MSR_SPE)
			kvmppc_vcpu_enable_spe(vcpu);
		else
			kvmppc_booke_queue_irqprio(vcpu,
						   BOOKE_IRQPRIO_SPE_UNAVAIL);
		r = RESUME_GUEST;
		break;
	}

	case BOOKE_INTERRUPT_SPE_FP_DATA:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_DATA);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SPE_FP_ROUND:
		kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SPE_FP_ROUND);
		r = RESUME_GUEST;
		break;
#else
	case BOOKE_INTERRUPT_SPE_UNAVAIL:
		/*
		 * Guest wants SPE, but host kernel doesn't support it.  Send
		 * an "unimplemented operation" program check to the guest.
		 */
		kvmppc_core_queue_program(vcpu, ESR_PUO | ESR_SPV);
		r = RESUME_GUEST;
		break;

	/*
	 * These really should never happen without CONFIG_SPE,
	 * as we should never enable the real MSR[SPE] in the guest.
	 */
	case BOOKE_INTERRUPT_SPE_FP_DATA:
	case BOOKE_INTERRUPT_SPE_FP_ROUND:
		printk(KERN_CRIT "%s: unexpected SPE interrupt %u at %08lx\n",
		       __func__, exit_nr, vcpu->arch.pc);
		run->hw.hardware_exit_reason = exit_nr;
		r = RESUME_HOST;
		break;
#endif

	case BOOKE_INTERRUPT_DATA_STORAGE:
		kvmppc_core_queue_data_storage(vcpu, vcpu->arch.fault_dear,
		                               vcpu->arch.fault_esr);
		kvmppc_account_exit(vcpu, DSI_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_INST_STORAGE:
		kvmppc_core_queue_inst_storage(vcpu, vcpu->arch.fault_esr);
		kvmppc_account_exit(vcpu, ISI_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_SYSCALL:
		if (!(vcpu->arch.shared->msr & MSR_PR) &&
		    (((u32)kvmppc_get_gpr(vcpu, 0)) == KVM_SC_MAGIC_R0)) {
			/* KVM PV hypercalls */
			kvmppc_set_gpr(vcpu, 3, kvmppc_kvm_pv(vcpu));
			r = RESUME_GUEST;
		} else {
			/* Guest syscalls */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_SYSCALL);
		}
		kvmppc_account_exit(vcpu, SYSCALL_EXITS);
		r = RESUME_GUEST;
		break;

	case BOOKE_INTERRUPT_DTLB_MISS: {
		unsigned long eaddr = vcpu->arch.fault_dear;
		int gtlb_index;
		gpa_t gpaddr;
		gfn_t gfn;

#ifdef CONFIG_KVM_E500
		if (!(vcpu->arch.shared->msr & MSR_PR) &&
		    (eaddr & PAGE_MASK) == vcpu->arch.magic_page_ea) {
			kvmppc_map_magic(vcpu);
			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
			r = RESUME_GUEST;

			break;
		}
#endif

		/* Check the guest TLB. */
		gtlb_index = kvmppc_mmu_dtlb_index(vcpu, eaddr);
		if (gtlb_index < 0) {
			/* The guest didn't have a mapping for it. */
			kvmppc_core_queue_dtlb_miss(vcpu,
			                            vcpu->arch.fault_dear,
			                            vcpu->arch.fault_esr);
			kvmppc_mmu_dtlb_miss(vcpu);
			kvmppc_account_exit(vcpu, DTLB_REAL_MISS_EXITS);
			r = RESUME_GUEST;
			break;
		}

		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
		gfn = gpaddr >> PAGE_SHIFT;

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't, and it is RAM. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
			kvmppc_account_exit(vcpu, DTLB_VIRT_MISS_EXITS);
			r = RESUME_GUEST;
		} else {
			/* Guest has mapped and accessed a page which is not
			 * actually RAM. */
			vcpu->arch.paddr_accessed = gpaddr;
			r = kvmppc_emulate_mmio(run, vcpu);
			kvmppc_account_exit(vcpu, MMIO_EXITS);
		}

		break;
	}

	case BOOKE_INTERRUPT_ITLB_MISS: {
		unsigned long eaddr = vcpu->arch.pc;
		gpa_t gpaddr;
		gfn_t gfn;
		int gtlb_index;

		r = RESUME_GUEST;

		/* Check the guest TLB. */
		gtlb_index = kvmppc_mmu_itlb_index(vcpu, eaddr);
		if (gtlb_index < 0) {
			/* The guest didn't have a mapping for it. */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_ITLB_MISS);
			kvmppc_mmu_itlb_miss(vcpu);
			kvmppc_account_exit(vcpu, ITLB_REAL_MISS_EXITS);
			break;
		}

		kvmppc_account_exit(vcpu, ITLB_VIRT_MISS_EXITS);

		gpaddr = kvmppc_mmu_xlate(vcpu, gtlb_index, eaddr);
		gfn = gpaddr >> PAGE_SHIFT;

		if (kvm_is_visible_gfn(vcpu->kvm, gfn)) {
			/* The guest TLB had a mapping, but the shadow TLB
			 * didn't. This could be because:
			 * a) the entry is mapping the host kernel, or
			 * b) the guest used a large mapping which we're faking
			 * Either way, we need to satisfy the fault without
			 * invoking the guest. */
			kvmppc_mmu_map(vcpu, eaddr, gpaddr, gtlb_index);
		} else {
			/* Guest mapped and leaped at non-RAM! */
			kvmppc_booke_queue_irqprio(vcpu, BOOKE_IRQPRIO_MACHINE_CHECK);
		}

		break;
	}

	case BOOKE_INTERRUPT_DEBUG: {
		u32 dbsr;

		vcpu->arch.pc = mfspr(SPRN_CSRR0);

		/* clear IAC events in DBSR register */
		dbsr = mfspr(SPRN_DBSR);
		dbsr &= DBSR_IAC1 | DBSR_IAC2 | DBSR_IAC3 | DBSR_IAC4;
		mtspr(SPRN_DBSR, dbsr);

		run->exit_reason = KVM_EXIT_DEBUG;
		kvmppc_account_exit(vcpu, DEBUG_EXITS);
		r = RESUME_HOST;
		break;
	}

	default:
		printk(KERN_EMERG "exit_nr %d\n", exit_nr);
		BUG();
	}

	local_irq_disable();

	kvmppc_core_prepare_to_enter(vcpu);

	if (!(r & RESUME_HOST)) {
		/* To avoid clobbering exit_reason, only check for signals if
		 * we aren't already exiting to userspace for some other
		 * reason. */
		if (signal_pending(current)) {
			run->exit_reason = KVM_EXIT_INTR;
			r = (-EINTR << 2) | RESUME_HOST | (r & RESUME_FLAG_NV);
			kvmppc_account_exit(vcpu, SIGNAL_EXITS);
		}
	}

	return r;
}