Exemple #1
0
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);

	/* Switch to requested VMID */
	write_sysreg(kvm->arch.vttbr, VTTBR);
	isb();

	write_sysreg(0, TLBIALL);
	dsb(nsh);
	isb();

	write_sysreg(0, VTTBR);
}
Exemple #2
0
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);
	unsigned long flags;

	/* Switch to requested VMID */
	__tlb_switch_to_guest()(kvm, &flags);

	__tlbi(vmalle1);
	dsb(nsh);
	isb();

	__tlb_switch_to_host()(kvm, flags);
}
Exemple #3
0
/* vcpu is already in the HYP VA space */
void __hyp_text __vgic_v2_restore_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
	struct vgic_dist *vgic = &kvm->arch.vgic;
	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
	int i, nr_lr;

	if (!base)
		return;

	writel_relaxed(cpu_if->vgic_hcr, base + GICH_HCR);
	writel_relaxed(cpu_if->vgic_vmcr, base + GICH_VMCR);
	writel_relaxed(cpu_if->vgic_apr, base + GICH_APR);

	nr_lr = vcpu->arch.vgic_cpu.nr_lr;
	for (i = 0; i < nr_lr; i++)
		writel_relaxed(cpu_if->vgic_lr[i], base + GICH_LR0 + (i * 4));
}
Exemple #4
0
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
	unsigned long flags;

	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
	__tlb_switch_to_guest()(kvm, &flags);

	/*
	 * We could do so much better if we had the VA as well.
	 * Instead, we invalidate Stage-2 for this IPA, and the
	 * whole of Stage-1. Weep...
	 */
	ipa >>= 12;
	__tlbi(ipas2e1is, ipa);

	/*
	 * We have to ensure completion of the invalidation at Stage-2,
	 * since a table walk on another CPU could refill a TLB with a
	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
	 * the Stage-1 invalidation happened first.
	 */
	dsb(ish);
	__tlbi(vmalle1is);
	dsb(ish);
	isb();

	/*
	 * If the host is running at EL1 and we have a VPIPT I-cache,
	 * then we must perform I-cache maintenance at EL2 in order for
	 * it to have an effect on the guest. Since the guest cannot hit
	 * I-cache lines allocated with a different VMID, we don't need
	 * to worry about junk out of guest reset (we nuke the I-cache on
	 * VMID rollover), but we do need to be careful when remapping
	 * executable pages for the same guest. This can happen when KSM
	 * takes a CoW fault on an executable page, copies the page into
	 * a page that was previously mapped in the guest and then needs
	 * to invalidate the guest view of the I-cache for that page
	 * from EL1. To solve this, we invalidate the entire I-cache when
	 * unmapping a page from a guest if we have a VPIPT I-cache but
	 * the host is running at EL1. As above, we could do better if
	 * we had the VA.
	 *
	 * The moral of this story is: if you have a VPIPT I-cache, then
	 * you should be running with VHE enabled.
	 */
	if (!has_vhe() && icache_is_vpipt())
		__flush_icache_all();

	__tlb_switch_to_host()(kvm, flags);
}
Exemple #5
0
/* vcpu is already in the HYP VA space */
void __hyp_text __vgic_v2_save_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_v2_cpu_if *cpu_if = &vcpu->arch.vgic_cpu.vgic_v2;
	struct vgic_dist *vgic = &kvm->arch.vgic;
	void __iomem *base = kern_hyp_va(vgic->vctrl_base);
	u32 eisr0, eisr1, elrsr0, elrsr1;
	int i, nr_lr;

	if (!base)
		return;

	nr_lr = vcpu->arch.vgic_cpu.nr_lr;
	cpu_if->vgic_vmcr = readl_relaxed(base + GICH_VMCR);
	cpu_if->vgic_misr = readl_relaxed(base + GICH_MISR);
	eisr0  = readl_relaxed(base + GICH_EISR0);
	elrsr0 = readl_relaxed(base + GICH_ELRSR0);
	if (unlikely(nr_lr > 32)) {
		eisr1  = readl_relaxed(base + GICH_EISR1);
		elrsr1 = readl_relaxed(base + GICH_ELRSR1);
	} else {
		eisr1 = elrsr1 = 0;
	}
#ifdef CONFIG_CPU_BIG_ENDIAN
	cpu_if->vgic_eisr  = ((u64)eisr0 << 32) | eisr1;
	cpu_if->vgic_elrsr = ((u64)elrsr0 << 32) | elrsr1;
#else
	cpu_if->vgic_eisr  = ((u64)eisr1 << 32) | eisr0;
	cpu_if->vgic_elrsr = ((u64)elrsr1 << 32) | elrsr0;
#endif
	cpu_if->vgic_apr    = readl_relaxed(base + GICH_APR);

	writel_relaxed(0, base + GICH_HCR);

	for (i = 0; i < nr_lr; i++)
		cpu_if->vgic_lr[i] = readl_relaxed(base + GICH_LR0 + (i * 4));
}
Exemple #6
0
/*
 * Called on entry to KVM_RUN unless this vcpu previously ran at least
 * once and the most recent prior KVM_RUN for this vcpu was called from
 * the same task as current (highly likely).
 *
 * This is guaranteed to execute before kvm_arch_vcpu_load_fp(vcpu),
 * such that on entering hyp the relevant parts of current are already
 * mapped.
 */
int kvm_arch_vcpu_run_map_fp(struct kvm_vcpu *vcpu)
{
	int ret;

	struct thread_info *ti = &current->thread_info;
	struct user_fpsimd_state *fpsimd = &current->thread.uw.fpsimd_state;

	/*
	 * Make sure the host task thread flags and fpsimd state are
	 * visible to hyp:
	 */
	ret = create_hyp_mappings(ti, ti + 1, PAGE_HYP);
	if (ret)
		goto error;

	ret = create_hyp_mappings(fpsimd, fpsimd + 1, PAGE_HYP);
	if (ret)
		goto error;

	vcpu->arch.host_thread_info = kern_hyp_va(ti);
	vcpu->arch.host_fpsimd_state = kern_hyp_va(fpsimd);
error:
	return ret;
}
Exemple #7
0
/**
 * Flush per-VMID TLBs
 *
 * __kvm_tlb_flush_vmid(struct kvm *kvm);
 *
 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
 * inside the inner-shareable domain (which is the case for all v7
 * implementations).  If we come across a non-IS SMP implementation, we'll
 * have to use an IPI based mechanism. Until then, we stick to the simple
 * hardware assisted version.
 *
 * As v7 does not support flushing per IPA, just nuke the whole TLB
 * instead, ignoring the ipa value.
 */
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
	write_sysreg(kvm->arch.vttbr, VTTBR);
	isb();

	write_sysreg(0, TLBIALLIS);
	dsb(ish);
	isb();

	write_sysreg(0, VTTBR);
}
Exemple #8
0
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
	unsigned long flags;

	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
	__tlb_switch_to_guest()(kvm, &flags);

	__tlbi(vmalls12e1is);
	dsb(ish);
	isb();

	__tlb_switch_to_host()(kvm, flags);
}
/*
 * __vgic_v2_perform_cpuif_access -- perform a GICV access on behalf of the
 *				     guest.
 *
 * @vcpu: the offending vcpu
 *
 * Returns:
 *  1: GICV access successfully performed
 *  0: Not a GICV access
 * -1: Illegal GICV access
 */
int __hyp_text __vgic_v2_perform_cpuif_access(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct vgic_dist *vgic = &kvm->arch.vgic;
	phys_addr_t fault_ipa;
	void __iomem *addr;
	int rd;

	/* Build the full address */
	fault_ipa  = kvm_vcpu_get_fault_ipa(vcpu);
	fault_ipa |= kvm_vcpu_get_hfar(vcpu) & GENMASK(11, 0);

	/* If not for GICV, move on */
	if (fault_ipa <  vgic->vgic_cpu_base ||
	    fault_ipa >= (vgic->vgic_cpu_base + KVM_VGIC_V2_CPU_SIZE))
		return 0;

	/* Reject anything but a 32bit access */
	if (kvm_vcpu_dabt_get_as(vcpu) != sizeof(u32))
		return -1;

	/* Not aligned? Don't bother */
	if (fault_ipa & 3)
		return -1;

	rd = kvm_vcpu_dabt_get_rd(vcpu);
	addr  = hyp_symbol_addr(kvm_vgic_global_state)->vcpu_hyp_va;
	addr += fault_ipa - vgic->vgic_cpu_base;

	if (kvm_vcpu_dabt_iswrite(vcpu)) {
		u32 data = vcpu_get_reg(vcpu, rd);
		if (__is_be(vcpu)) {
			/* guest pre-swabbed data, undo this for writel() */
			data = swab32(data);
		}
		writel_relaxed(data, addr);
	} else {
		u32 data = readl_relaxed(addr);
		if (__is_be(vcpu)) {
			/* guest expects swabbed data */
			data = swab32(data);
		}
		vcpu_set_reg(vcpu, rd, data);
	}

	return 1;
}
Exemple #10
0
void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
	u64 val;

	/*
	 * Disallow physical timer access for the guest
	 * Physical counter access is allowed
	 */
	val = read_sysreg(cnthctl_el2);
	val &= ~CNTHCTL_EL1PCEN;
	val |= CNTHCTL_EL1PCTEN;
	write_sysreg(val, cnthctl_el2);

	if (kvm->arch.timer.enabled) {
		write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
		write_sysreg(timer->cntv_cval, cntv_cval_el0);
		isb();
		write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
	}
}
Exemple #11
0
/* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
	u64 val;

	if (kvm->arch.timer.enabled) {
		timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
		timer->cntv_cval = read_sysreg(cntv_cval_el0);
	}

	/* Disable the virtual timer */
	write_sysreg(0, cntv_ctl_el0);

	/* Allow physical timer/counter access for the host */
	val = read_sysreg(cnthctl_el2);
	val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
	write_sysreg(val, cnthctl_el2);

	/* Clear cntvoff for the host */
	write_sysreg(0, cntvoff_el2);
}