Exemplo n.º 1
0
static void __hyp_text __sysreg_restore_state(struct kvm_cpu_context *ctxt)
{
	write_sysreg(ctxt->sys_regs[MPIDR_EL1],		vmpidr_el2);
	write_sysreg(ctxt->sys_regs[CSSELR_EL1],	csselr_el1);
	write_sysreg_el1(ctxt->sys_regs[SCTLR_EL1],	sctlr);
	write_sysreg_el1(ctxt->sys_regs[CPACR_EL1],	cpacr);
	write_sysreg_el1(ctxt->sys_regs[TTBR0_EL1],	ttbr0);
	write_sysreg_el1(ctxt->sys_regs[TTBR1_EL1],	ttbr1);
	write_sysreg_el1(ctxt->sys_regs[TCR_EL1],	tcr);
	write_sysreg_el1(ctxt->sys_regs[ESR_EL1],	esr);
	write_sysreg_el1(ctxt->sys_regs[AFSR0_EL1],	afsr0);
	write_sysreg_el1(ctxt->sys_regs[AFSR1_EL1],	afsr1);
	write_sysreg_el1(ctxt->sys_regs[FAR_EL1],	far);
	write_sysreg_el1(ctxt->sys_regs[MAIR_EL1],	mair);
	write_sysreg_el1(ctxt->sys_regs[VBAR_EL1],	vbar);
	write_sysreg_el1(ctxt->sys_regs[CONTEXTIDR_EL1],contextidr);
	write_sysreg_el1(ctxt->sys_regs[AMAIR_EL1],	amair);
	write_sysreg_el1(ctxt->sys_regs[CNTKCTL_EL1], 	cntkctl);
	write_sysreg(ctxt->sys_regs[PAR_EL1],		par_el1);
	write_sysreg(ctxt->sys_regs[TPIDR_EL1],		tpidr_el1);

	write_sysreg(ctxt->gp_regs.sp_el1,		sp_el1);
	write_sysreg_el1(ctxt->gp_regs.elr_el1,		elr);
	write_sysreg_el1(ctxt->gp_regs.spsr[KVM_SPSR_EL1],spsr);
	write_sysreg_el2(ctxt->gp_regs.regs.pc,		elr);
	write_sysreg_el2(ctxt->gp_regs.regs.pstate,	spsr);

	if (cpus_have_const_cap(ARM64_HAS_RAS_EXTN))
		write_sysreg_s(ctxt->sys_regs[DISR_EL1], SYS_VDISR_EL2);
}
Exemplo n.º 2
0
static void __hyp_text __sysreg_restore_common_state(struct kvm_cpu_context *ctxt)
{
	write_sysreg(ctxt->sys_regs[ACTLR_EL1],	  actlr_el1);
	write_sysreg(ctxt->sys_regs[TPIDR_EL0],	  tpidr_el0);
	write_sysreg(ctxt->sys_regs[TPIDRRO_EL0], tpidrro_el0);
	write_sysreg(ctxt->sys_regs[MDSCR_EL1],	  mdscr_el1);
	write_sysreg(ctxt->gp_regs.regs.sp,	  sp_el0);
}
Exemplo n.º 3
0
static inline int armv8pmu_disable_intens(int idx)
{
	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
	write_sysreg(BIT(counter), pmintenclr_el1);
	isb();
	/* Clear the overflow flag in case an interrupt is pending. */
	write_sysreg(BIT(counter), pmovsclr_el0);
	isb();

	return idx;
}
Exemplo n.º 4
0
static void tls_thread_switch(struct task_struct *next)
{
	tls_preserve_current_state();

	if (is_compat_thread(task_thread_info(next)))
		write_sysreg(next->thread.uw.tp_value, tpidrro_el0);
	else if (!arm64_kernel_unmapped_at_el0())
		write_sysreg(0, tpidrro_el0);

	write_sysreg(*task_user_tls(next), tpidr_el0);
}
Exemplo n.º 5
0
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
						unsigned long flags)
{
	/*
	 * We're done with the TLB operation, let's restore the host's
	 * view of HCR_EL2.
	 */
	write_sysreg(0, vttbr_el2);
	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
	isb();
	local_irq_restore(flags);
}
Exemplo n.º 6
0
static void tls_thread_switch(struct task_struct *next)
{
	unsigned long tpidr, tpidrro;

	tls_preserve_current_state();

	tpidr = *task_user_tls(next);
	tpidrro = is_compat_thread(task_thread_info(next)) ?
		  next->thread.tp_value : 0;

	write_sysreg(tpidr, tpidr_el0);
	write_sysreg(tpidrro, tpidrro_el0);
}
Exemplo n.º 7
0
void __hyp_text __kvm_tlb_flush_local_vmid(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(kern_hyp_va(vcpu)->kvm);

	/* Switch to requested VMID */
	write_sysreg(kvm->arch.vttbr, VTTBR);
	isb();

	write_sysreg(0, TLBIALL);
	dsb(nsh);
	isb();

	write_sysreg(0, VTTBR);
}
Exemplo n.º 8
0
/**
 * Flush per-VMID TLBs
 *
 * __kvm_tlb_flush_vmid(struct kvm *kvm);
 *
 * We rely on the hardware to broadcast the TLB invalidation to all CPUs
 * inside the inner-shareable domain (which is the case for all v7
 * implementations).  If we come across a non-IS SMP implementation, we'll
 * have to use an IPI based mechanism. Until then, we stick to the simple
 * hardware assisted version.
 *
 * As v7 does not support flushing per IPA, just nuke the whole TLB
 * instead, ignoring the ipa value.
 */
void __hyp_text __kvm_tlb_flush_vmid(struct kvm *kvm)
{
	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
	write_sysreg(kvm->arch.vttbr, VTTBR);
	isb();

	write_sysreg(0, TLBIALLIS);
	dsb(ish);
	isb();

	write_sysreg(0, VTTBR);
}
Exemplo n.º 9
0
static inline void armv8pmu_write_evtype(int idx, u32 val)
{
	if (armv8pmu_select_counter(idx) == idx) {
		val &= ARMV8_PMU_EVTYPE_MASK;
		write_sysreg(val, pmxevtyper_el0);
	}
}
Exemplo n.º 10
0
static void tls_thread_flush(void)
{
	write_sysreg(0, tpidr_el0);

	if (is_compat_task()) {
		current->thread.tp_value = 0;

		/*
		 * We need to ensure ordering between the shadow state and the
		 * hardware state, so that we don't corrupt the hardware state
		 * with a stale shadow state during context switch.
		 */
		barrier();
		write_sysreg(0, tpidrro_el0);
	}
}
Exemplo n.º 11
0
static inline int armv8pmu_select_counter(int idx)
{
	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
	write_sysreg(counter, pmselr_el0);
	isb();

	return idx;
}
Exemplo n.º 12
0
static __always_inline void fsl_a008585_set_next_event(const int access,
		unsigned long evt, struct clock_event_device *clk)
{
	unsigned long ctrl;
	u64 cval = evt + arch_counter_get_cntvct();

	ctrl = arch_timer_reg_read(access, ARCH_TIMER_REG_CTRL, clk);
	ctrl |= ARCH_TIMER_CTRL_ENABLE;
	ctrl &= ~ARCH_TIMER_CTRL_IT_MASK;

	if (access == ARCH_TIMER_PHYS_ACCESS)
		write_sysreg(cval, cntp_cval_el0);
	else if (access == ARCH_TIMER_VIRT_ACCESS)
		write_sysreg(cval, cntv_cval_el0);

	arch_timer_reg_write(access, ARCH_TIMER_REG_CTRL, ctrl, clk);
}
Exemplo n.º 13
0
void __hyp_text enable_el1_phys_timer_access(void)
{
	u64 val;

	/* Allow physical timer/counter access for the host */
	val = read_sysreg(cnthctl_el2);
	val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
	write_sysreg(val, cnthctl_el2);
}
Exemplo n.º 14
0
static void __cpu_do_idle_irqprio(void)
{
	unsigned long pmr;
	unsigned long daif_bits;

	daif_bits = read_sysreg(daif);
	write_sysreg(daif_bits | PSR_I_BIT, daif);

	/*
	 * Unmask PMR before going idle to make sure interrupts can
	 * be raised.
	 */
	pmr = gic_read_pmr();
	gic_write_pmr(GIC_PRIO_IRQON);

	__cpu_do_idle();

	gic_write_pmr(pmr);
	write_sysreg(daif_bits, daif);
}
Exemplo n.º 15
0
static void __hyp_text __tlb_switch_to_guest_vhe(struct kvm *kvm,
						 unsigned long *flags)
{
	u64 val;

	local_irq_save(*flags);

	/*
	 * With VHE enabled, we have HCR_EL2.{E2H,TGE} = {1,1}, and
	 * most TLB operations target EL2/EL0. In order to affect the
	 * guest TLBs (EL1/EL0), we need to change one of these two
	 * bits. Changing E2H is impossible (goodbye TTBR1_EL2), so
	 * let's flip TGE before executing the TLB operation.
	 */
	write_sysreg(kvm->arch.vttbr, vttbr_el2);
	val = read_sysreg(hcr_el2);
	val &= ~HCR_TGE;
	write_sysreg(val, hcr_el2);
	isb();
}
/*
 * Cache Size Selection Register(CSSELR) selects which Cache Size ID
 * Register(CCSIDR) is accessible by specifying the required cache
 * level and the cache type. We need to ensure that no one else changes
 * CSSELR by calling this in non-preemtible context
 */
u64 __attribute_const__ cache_get_ccsidr(u64 csselr)
{
	u64 ccsidr;

	WARN_ON(preemptible());

	write_sysreg(csselr, csselr_el1);
	isb();
	ccsidr = read_sysreg(ccsidr_el1);

	return ccsidr;
}
Exemplo n.º 17
0
static inline void armv8pmu_write_counter(struct perf_event *event, u32 value)
{
	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;

	if (!armv8pmu_counter_valid(cpu_pmu, idx))
		pr_err("CPU%u writing wrong counter %d\n",
			smp_processor_id(), idx);
	else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
		/*
		 * Set the upper 32bits as this is a 64bit counter but we only
		 * count using the lower 32bits and we want an interrupt when
		 * it overflows.
		 */
		u64 value64 = 0xffffffff00000000ULL | value;

		write_sysreg(value64, pmccntr_el0);
	} else if (armv8pmu_select_counter(idx) == idx)
		write_sysreg(value, pmxevcntr_el0);
}
Exemplo n.º 18
0
/* vcpu is already in the HYP VA space */
void __hyp_text __timer_save_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
	u64 val;

	if (kvm->arch.timer.enabled) {
		timer->cntv_ctl = read_sysreg(cntv_ctl_el0);
		timer->cntv_cval = read_sysreg(cntv_cval_el0);
	}

	/* Disable the virtual timer */
	write_sysreg(0, cntv_ctl_el0);

	/* Allow physical timer/counter access for the host */
	val = read_sysreg(cnthctl_el2);
	val |= CNTHCTL_EL1PCTEN | CNTHCTL_EL1PCEN;
	write_sysreg(val, cnthctl_el2);

	/* Clear cntvoff for the host */
	write_sysreg(0, cntvoff_el2);
}
Exemplo n.º 19
0
void __hyp_text __timer_restore_state(struct kvm_vcpu *vcpu)
{
	struct kvm *kvm = kern_hyp_va(vcpu->kvm);
	struct arch_timer_cpu *timer = &vcpu->arch.timer_cpu;
	u64 val;

	/*
	 * Disallow physical timer access for the guest
	 * Physical counter access is allowed
	 */
	val = read_sysreg(cnthctl_el2);
	val &= ~CNTHCTL_EL1PCEN;
	val |= CNTHCTL_EL1PCTEN;
	write_sysreg(val, cnthctl_el2);

	if (kvm->arch.timer.enabled) {
		write_sysreg(kvm->arch.timer.cntvoff, cntvoff_el2);
		write_sysreg(timer->cntv_cval, cntv_cval_el0);
		isb();
		write_sysreg(timer->cntv_ctl, cntv_ctl_el0);
	}
}
Exemplo n.º 20
0
static inline u32 armv8pmu_getreset_flags(void)
{
	u32 value;

	/* Read */
	value = read_sysreg(pmovsclr_el0);

	/* Write to clear flags */
	value &= ARMV8_PMU_OVSR_MASK;
	write_sysreg(value, pmovsclr_el0);

	return value;
}
Exemplo n.º 21
0
void __hyp_text disable_el1_phys_timer_access(void)
{
	u64 val;

	/*
	 * Disallow physical timer access for the guest
	 * Physical counter access is allowed
	 */
	val = read_sysreg(cnthctl_el2);
	val &= ~CNTHCTL_EL1PCEN;
	val |= CNTHCTL_EL1PCTEN;
	write_sysreg(val, cnthctl_el2);
}
Exemplo n.º 22
0
void vcpu_write_spsr32(struct kvm_vcpu *vcpu, unsigned long v)
{
	int spsr_idx = vcpu_spsr32_mode(vcpu);

	if (!vcpu->arch.sysregs_loaded_on_cpu) {
		vcpu_gp_regs(vcpu)->spsr[spsr_idx] = v;
		return;
	}

	switch (spsr_idx) {
	case KVM_SPSR_SVC:
		write_sysreg_el1(v, spsr);
	case KVM_SPSR_ABT:
		write_sysreg(v, spsr_abt);
	case KVM_SPSR_UND:
		write_sysreg(v, spsr_und);
	case KVM_SPSR_IRQ:
		write_sysreg(v, spsr_irq);
	case KVM_SPSR_FIQ:
		write_sysreg(v, spsr_fiq);
	}
}
Exemplo n.º 23
0
static inline void armv8pmu_write_counter(struct perf_event *event, u64 value)
{
	struct arm_pmu *cpu_pmu = to_arm_pmu(event->pmu);
	struct hw_perf_event *hwc = &event->hw;
	int idx = hwc->idx;

	if (!armv8pmu_counter_valid(cpu_pmu, idx))
		pr_err("CPU%u writing wrong counter %d\n",
			smp_processor_id(), idx);
	else if (idx == ARMV8_IDX_CYCLE_COUNTER) {
		/*
		 * The cycles counter is really a 64-bit counter.
		 * When treating it as a 32-bit counter, we only count
		 * the lower 32 bits, and set the upper 32-bits so that
		 * we get an interrupt upon 32-bit overflow.
		 */
		if (!armv8pmu_event_is_64bit(event))
			value |= 0xffffffff00000000ULL;
		write_sysreg(value, pmccntr_el0);
	} else
		armv8pmu_write_hw_counter(event, value);
}
Exemplo n.º 24
0
void __hyp_text __sysreg32_restore_state(struct kvm_vcpu *vcpu)
{
	u64 *spsr, *sysreg;

	if (read_sysreg(hcr_el2) & HCR_RW)
		return;

	spsr = vcpu->arch.ctxt.gp_regs.spsr;
	sysreg = vcpu->arch.ctxt.sys_regs;

	write_sysreg(spsr[KVM_SPSR_ABT], spsr_abt);
	write_sysreg(spsr[KVM_SPSR_UND], spsr_und);
	write_sysreg(spsr[KVM_SPSR_IRQ], spsr_irq);
	write_sysreg(spsr[KVM_SPSR_FIQ], spsr_fiq);

	write_sysreg(sysreg[DACR32_EL2], dacr32_el2);
	write_sysreg(sysreg[IFSR32_EL2], ifsr32_el2);

	if (vcpu->arch.debug_flags & KVM_ARM64_DEBUG_DIRTY)
		write_sysreg(sysreg[DBGVCR32_EL2], dbgvcr32_el2);
}
Exemplo n.º 25
0
static inline void prlar_write(u32 v)
{
	write_sysreg(v, PRLAR);
}
Exemplo n.º 26
0
static inline void prbar_write(u32 v)
{
	write_sysreg(v, PRBAR);
}
Exemplo n.º 27
0
static inline void prsel_write(u32 v)
{
	write_sysreg(v, PRSEL);
}
Exemplo n.º 28
0
static inline int armv8pmu_enable_intens(int idx)
{
	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
	write_sysreg(BIT(counter), pmintenset_el1);
	return idx;
}
Exemplo n.º 29
0
static inline int armv8pmu_disable_counter(int idx)
{
	u32 counter = ARMV8_IDX_TO_COUNTER(idx);
	write_sysreg(BIT(counter), pmcntenclr_el0);
	return idx;
}
Exemplo n.º 30
0
static inline void armv8pmu_pmcr_write(u32 val)
{
	val &= ARMV8_PMU_PMCR_MASK;
	isb();
	write_sysreg(val, pmcr_el0);
}