Exemplo n.º 1
0
static int handle_dabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	/* This is either an error in the ws. code or an external abort */
	kvm_err("Data Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
		kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
	return -EFAULT;
}
Exemplo n.º 2
0
static int handle_pabt_hyp(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	/* The hypervisor should never cause aborts */
	kvm_err("Prefetch Abort taken from Hyp mode at %#08lx (HSR: %#08x)\n",
		kvm_vcpu_get_hfar(vcpu), kvm_vcpu_get_hsr(vcpu));
	return -EFAULT;
}
Exemplo n.º 3
0
static int decode_hsr(struct kvm_vcpu *vcpu, phys_addr_t fault_ipa,
		      struct kvm_exit_mmio *mmio)
{
	unsigned long rt, len;
	bool is_write, sign_extend;

	if (kvm_vcpu_dabt_isextabt(vcpu)) {
		/* cache operation on I/O addr, tell guest unsupported */
		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
		return 1;
	}

	if (kvm_vcpu_dabt_iss1tw(vcpu)) {
		/* page table accesses IO mem: tell guest to fix its TTBR */
		kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
		return 1;
	}

	len = kvm_vcpu_dabt_get_as(vcpu);
	if (unlikely(len < 0))
		return len;

	is_write = kvm_vcpu_dabt_iswrite(vcpu);
	sign_extend = kvm_vcpu_dabt_issext(vcpu);
	rt = kvm_vcpu_dabt_get_rd(vcpu);

	if (kvm_vcpu_reg_is_pc(vcpu, rt)) {
		/* IO memory trying to read/write pc */
		kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
		return 1;
	}

	mmio->is_write = is_write;
	mmio->phys_addr = fault_ipa;
	mmio->len = len;
	vcpu->arch.mmio_decode.sign_extend = sign_extend;
	vcpu->arch.mmio_decode.rt = rt;

	/*
	 * The MMIO instruction is emulated and should not be re-executed
	 * in the guest.
	 */
	kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
	return 0;
}
Exemplo n.º 4
0
static int decode_hsr(struct kvm_vcpu *vcpu, bool *is_write, int *len)
{
    unsigned long rt;
    int access_size;
    bool sign_extend;

    if (kvm_vcpu_dabt_isextabt(vcpu)) {
        /* cache operation on I/O addr, tell guest unsupported */
        kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
        return 1;
    }

    if (kvm_vcpu_dabt_iss1tw(vcpu)) {
        /* page table accesses IO mem: tell guest to fix its TTBR */
        kvm_inject_dabt(vcpu, kvm_vcpu_get_hfar(vcpu));
        return 1;
    }

    access_size = kvm_vcpu_dabt_get_as(vcpu);
    if (unlikely(access_size < 0))
        return access_size;

    *is_write = kvm_vcpu_dabt_iswrite(vcpu);
    sign_extend = kvm_vcpu_dabt_issext(vcpu);
    rt = kvm_vcpu_dabt_get_rd(vcpu);

    *len = access_size;
    vcpu->arch.mmio_decode.sign_extend = sign_extend;
    vcpu->arch.mmio_decode.rt = rt;

    /*
     * The MMIO instruction is emulated and should not be re-executed
     * in the guest.
     */
    kvm_skip_instr(vcpu, kvm_vcpu_trap_il_is32bit(vcpu));
    return 0;
}
Exemplo n.º 5
0
/*
 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
 *				     guest.
 *
 * @vcpu: the offending vcpu
 *
 * Returns:
 *  1: GICV access successfully performed
 *  0: Not a GICV access
 * -1: Illegal GICV access
 */
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_dist *vgic = &kvm->arch.vgic;
	phys_addr_t fault_ipa;
	void __iomem *addr;
	int rd;

	/* Build the full address */
	fault_ipa  = kvm_vcpu_get_fault_ipa(vcpu);
	fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);

	/* If not for GICV, move on */
	if (fault_ipa <  vgic->vgic_cpu_base ||
	    fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
		return 0;

	/* Reject anything but a 32bit access */
	if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
		return -1;

	/* Not aligned? Don't bother */
	if (fault_ipa & 3)
		return -1;

	rd = kvm_vcpu_dabt_get_rd(vcpu);
	addr  = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
	addr += fault_ipa - vgic->vgic_cpu_base;

	if (kvm_vcpu_dabt_iswrite(vcpu)) {
		u32 data = vcpu_get_reg(vcpu, rd);
		if (__is_be(vcpu)) {
			/* guest pre-swabbed data, undo this for writel() */
			data = swab32(data);
		}
		writel_relaxed(data, addr);
	} else {
		u32 data = readl_relaxed(addr);
		if (__is_be(vcpu)) {
			/* guest expects swabbed data */
			data = swab32(data);
		}
		vcpu_set_reg(vcpu, rd, data);
	}

	return 1;
}
Exemplo n.º 6
0
/**
 * kvm_handle_guest_abort - handles all 2nd stage aborts
 * @vcpu:	the VCPU pointer
 * @run:	the kvm_run structure
 *
 * Any abort that gets to the host is almost guaranteed to be caused by a
 * missing second stage translation table entry, which can mean that either the
 * guest simply needs more memory and we must allocate an appropriate page or it
 * can mean that the guest tried to access I/O memory, which is emulated by user
 * space. The distinction is based on the IPA causing the fault and whether this
 * memory region has been registered as standard RAM by user space.
 */
int kvm_handle_guest_abort(struct kvm_vcpu *vcpu, struct kvm_run *run)
{
	unsigned long fault_status;
	phys_addr_t fault_ipa;
	struct kvm_memory_slot *memslot;
	bool is_iabt;
	gfn_t gfn;
	int ret, idx;

	is_iabt = kvm_vcpu_trap_is_iabt(vcpu);
	fault_ipa = kvm_vcpu_get_fault_ipa(vcpu);

	trace_kvm_guest_fault(*vcpu_pc(vcpu), kvm_vcpu_get_hsr(vcpu),
			      kvm_vcpu_get_hfar(vcpu), fault_ipa);

	/* Check the stage-2 fault is trans. fault or write fault */
	fault_status = kvm_vcpu_trap_get_fault(vcpu);
	if (fault_status != FSC_FAULT && fault_status != FSC_PERM) {
		kvm_err("Unsupported fault status: EC=%#x DFCS=%#lx\n",
			kvm_vcpu_trap_get_class(vcpu), fault_status);
		return -EFAULT;
	}

	idx = srcu_read_lock(&vcpu->kvm->srcu);

	gfn = fault_ipa >> PAGE_SHIFT;
	if (!kvm_is_visible_gfn(vcpu->kvm, gfn)) {
		if (is_iabt) {
			/* Prefetch Abort on I/O address */
			kvm_inject_pabt(vcpu, kvm_vcpu_get_hfar(vcpu));
			ret = 1;
			goto out_unlock;
		}

		if (fault_status != FSC_FAULT) {
			kvm_err("Unsupported fault status on io memory: %#lx\n",
				fault_status);
			ret = -EFAULT;
			goto out_unlock;
		}

		/*
		 * The IPA is reported as [MAX:12], so we need to
		 * complement it with the bottom 12 bits from the
		 * faulting VA. This is always 12 bits, irrespective
		 * of the page size.
		 */
		fault_ipa |= kvm_vcpu_get_hfar(vcpu) & ((1 << 12) - 1);
		ret = io_mem_abort(vcpu, run, fault_ipa);
		goto out_unlock;
	}

	memslot = gfn_to_memslot(vcpu->kvm, gfn);

	ret = user_mem_abort(vcpu, fault_ipa, gfn, memslot, fault_status);
	if (ret == 0)
		ret = 1;
out_unlock:
	srcu_read_unlock(&vcpu->kvm->srcu, idx);
	return ret;
}