Esempio n. 1
0
void __cpuinit init_traps(void)
{
    /* Setup Hyp vector base */
    WRITE_SYSREG((vaddr_t)hyp_traps_vector, VBAR_EL2);

    /* Setup hypervisor traps */
    WRITE_SYSREG(HCR_PTW|HCR_BSU_OUTER|HCR_AMO|HCR_IMO|HCR_VM|HCR_TWI|HCR_TSC|
                 HCR_TAC, HCR_EL2);
    isb();
}
Esempio n. 2
0
void stop_cpu(void)
{
    local_irq_disable();
    cpu_is_dead = 1;
    /* Make sure the write happens before we sleep forever */
    dsb(sy);
    isb();
    while ( 1 )
        wfi();
}
Esempio n. 3
0
/*
 * Counter Clear Enable Register
 */
static __inline void
arm64_counter_disable(unsigned int pmc)
{
	uint32_t reg;

	reg = (1 << pmc);
	WRITE_SPECIALREG(PMCNTENCLR_EL0, reg);

	isb();
}
Esempio n. 4
0
/*
 * Interrupt Enable Set Register
 */
static __inline void
arm64_interrupt_enable(uint32_t pmc)
{
	uint32_t reg;

	reg = (1 << pmc);
	WRITE_SPECIALREG(PMINTENSET_EL1, reg);

	isb();
}
Esempio n. 5
0
int vfp_flush_context(void)
{
	unsigned long flags;
	struct thread_info *ti;
	u32 fpexc;
	u32 cpu;
	int saved = 0;

	local_irq_save(flags);

	ti = current_thread_info();
	fpexc = fmrx(FPEXC);
	cpu = ti->cpu;

#ifdef CONFIG_SMP
	/* On SMP, if VFP is enabled, save the old state */
	if ((fpexc & FPEXC_EN) && vfp_current_hw_state[cpu]) {
		vfp_current_hw_state[cpu]->hard.cpu = cpu;
#else
	/* If there is a VFP context we must save it. */
	if (vfp_current_hw_state[cpu]) {
		/* Enable VFP so we can save the old state. */
		fmxr(FPEXC, fpexc | FPEXC_EN);
		isb();
#endif
		vfp_save_state(vfp_current_hw_state[cpu], fpexc);

		/* disable, just in case */
		fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
		saved = 1;
	} else if (vfp_current_hw_state[ti->cpu]) {
#ifndef CONFIG_SMP
		fmxr(FPEXC, fpexc | FPEXC_EN);
		vfp_save_state(vfp_current_hw_state[ti->cpu], fpexc);
		fmxr(FPEXC, fpexc);
#endif
	}
	vfp_current_hw_state[cpu] = NULL;

	/* clear any information we had about last context state */
	vfp_current_hw_state[ti->cpu] = NULL;

	local_irq_restore(flags);

	return saved;
}

void vfp_reinit(void)
{
	/* ensure we have access to the vfp */
	vfp_enable(NULL);

	/* and disable it to ensure the next usage restores the state */
	fmxr(FPEXC, fmrx(FPEXC) & ~FPEXC_EN);
}
Esempio n. 6
0
static int mvgbe_send(struct eth_device *dev, void *dataptr,
		      int datasize)
{
	struct mvgbe_device *dmvgbe = to_mvgbe(dev);
	struct mvgbe_registers *regs = dmvgbe->regs;
	struct mvgbe_txdesc *p_txdesc = dmvgbe->p_txdesc;
	void *p = (void *)dataptr;
	u32 cmd_sts;
	u32 txuq0_reg_addr;

	/* Copy buffer if it's misaligned */
	if ((u32) dataptr & 0x07) {
		if (datasize > PKTSIZE_ALIGN) {
			printf("Non-aligned data too large (%d)\n",
					datasize);
			return -1;
		}

		memcpy(dmvgbe->p_aligned_txbuf, p, datasize);
		p = dmvgbe->p_aligned_txbuf;
	}

	p_txdesc->cmd_sts = MVGBE_ZERO_PADDING | MVGBE_GEN_CRC;
	p_txdesc->cmd_sts |= MVGBE_TX_FIRST_DESC | MVGBE_TX_LAST_DESC;
	p_txdesc->cmd_sts |= MVGBE_BUFFER_OWNED_BY_DMA;
	p_txdesc->cmd_sts |= MVGBE_TX_EN_INTERRUPT;
	p_txdesc->buf_ptr = (u8 *) p;
	p_txdesc->byte_cnt = datasize;

	/* Set this tc desc as zeroth TXUQ */
	txuq0_reg_addr = (u32)&regs->tcqdp[TXUQ];
	writel((u32) p_txdesc, txuq0_reg_addr);

	/* ensure tx desc writes above are performed before we start Tx DMA */
	isb();

	/* Apply send command using zeroth TXUQ */
	MVGBE_REG_WR(regs->tqc, (1 << TXUQ));

	/*
	 * wait for packet xmit completion
	 */
	cmd_sts = readl(&p_txdesc->cmd_sts);
	while (cmd_sts & MVGBE_BUFFER_OWNED_BY_DMA) {
		/* return fail if error is detected */
		if ((cmd_sts & (MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME)) ==
				(MVGBE_ERROR_SUMMARY | MVGBE_TX_LAST_FRAME) &&
				cmd_sts & (MVGBE_UR_ERROR | MVGBE_RL_ERROR)) {
			printf("Err..(%s) in xmit packet\n", __FUNCTION__);
			return -1;
		}
		cmd_sts = readl(&p_txdesc->cmd_sts);
	};
	return 0;
}
Esempio n. 7
0
static
void zynq_cpu1_init(void)
{
#if 0
	unsigned long r;
	unsigned long orig_reset;
	unsigned long loop;
	unsigned long ctrl;

	/* Initialize Snoop Control Unit */
	ctrl = mmio_readl(ZYNQ_SCU_PHYS_BASE + SCU_CONTROL_0);
	ctrl |= 1;
	mmio_writel(ctrl, ZYNQ_SCU_PHYS_BASE + SCU_CONTROL_0);

	/* Set boot entry */
	mmio_writel(virt_to_phys(secondary_startup),
	IO_ADDRESS(TEGRA_EXCEPTION_VECTORS_BASE) + EVP_CPU_RESET_VECTOR_0);

	dsb();
	isb();

	/* Halt CPU */
	mmio_writel(0, IO_ADDRESS(TEGRA_FLOW_CTRL_BASE) + FLOW_CTRL_HALT_CPUx_EVENTS(1));

	dsb();
	isb();

	/* CPU Clock Stop */
	r = mmio_readl(IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0);
	r &= ~CPU_CLK_STOP(1);
	mmio_writel(r, IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_CLK_CPU_CMPLX_0);

	dsb();
	isb();

	/* Restart Slave CPU */
	mmio_writel(CPU_RESET(1), IO_ADDRESS(TEGRA_CLK_RESET_BASE) + CLK_RST_CONTROLLER_RST_CPU_CMPLX_CLR_0);

	dsb();
	isb();
#endif
}
static void etm_clr_pwrdwn(struct etm_drvdata *drvdata)
{
	u32 etmcr;

	etmcr = etm_readl(drvdata, ETMCR);
	etmcr &= ~ETMCR_PWD_DWN;
	etm_writel(drvdata, etmcr, ETMCR);
	/* Ensure pwrup completes before subsequent cp14 accesses */
	mb();
	isb();
}
Esempio n. 9
0
static void
arm64_pmcn_write(unsigned int pmc, uint32_t reg)
{

	KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));

	WRITE_SPECIALREG(PMSELR_EL0, pmc);
	WRITE_SPECIALREG(PMXEVCNTR_EL0, reg);

	isb();
}
static void etm_set_pwrdwn(struct etm_drvdata *drvdata)
{
	u32 etmcr;

	/* Ensure pending cp14 accesses complete before setting pwrdwn */
	mb();
	isb();
	etmcr = etm_readl(drvdata, ETMCR);
	etmcr |= ETMCR_PWD_DWN;
	etm_writel(drvdata, etmcr, ETMCR);
}
Esempio n. 11
0
void restore_cpu_source_ctrl(void)
{
	u32 reg_val;
	reg_val = smc_readl(CCM_CPU_SOURCECTRL);
	reg_val |= 1;
	smc_writel(reg_val, CCM_CPU_SOURCECTRL);

	__usdelay(1000);
	dmb();
	isb();
}
void arch_restore_coreinfo(void)
{
#ifdef CONFIG_CORESIGHT_TRACE_SUPPORT
	if (etm_need_save_restore()) {
		coresight_etm_restore();
		coresight_local_etf_restore();
		dsb();
		isb();
	}
#endif
}
Esempio n. 13
0
void modify_cpu_source_ctrl(void)
{
	u32 reg_val;
	reg_val = smc_readl(CCM_CPU_SOURCECTRL);
	reg_val &= ~1;

	smc_writel(reg_val, CCM_CPU_SOURCECTRL);
	__usdelay(10);
	dmb();
	isb();
}
static void etm_set_pwrup(struct etm_drvdata *drvdata)
{
	u32 etmpdcr;

	etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
	etmpdcr |= ETMPDCR_PWD_UP;
	writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
	/* Ensure pwrup completes before subsequent cp14 accesses */
	mb();
	isb();
}
Esempio n. 15
0
int __asm_flush_l3_cache(void)
{
    struct pt_regs regs = {0};

    isb();

    regs.regs[0] = SMC_SIP_INVOKE_MCE | MCE_SMC_ROC_FLUSH_CACHE;
    smc_call(&regs);

    return 0;
}
static void etm_clr_pwrup(struct etm_drvdata *drvdata)
{
	u32 etmpdcr;

	/* Ensure pending cp14 accesses complete before clearing pwrup */
	mb();
	isb();
	etmpdcr = readl_relaxed(drvdata->base + ETMPDCR);
	etmpdcr &= ~ETMPDCR_PWD_UP;
	writel_relaxed(etmpdcr, drvdata->base + ETMPDCR);
}
Esempio n. 17
0
static void release_secondary_early_hpen(size_t pos)
{
	uint32_t *p_entry = bckreg_address(BCKR_CORE1_BRANCH_ADDRESS);
	uint32_t *p_magic = bckreg_address(BCKR_CORE1_MAGIC_NUMBER);

	*p_entry = TEE_LOAD_ADDR;
	*p_magic = BOOT_API_A7_CORE1_MAGIC_NUMBER;

	dmb();
	isb();
	itr_raise_sgi(GIC_SEC_SGI_0, BIT(pos));
}
Esempio n. 18
0
static void __hyp_text __tlb_switch_to_host_vhe(struct kvm *kvm,
						unsigned long flags)
{
	/*
	 * We're done with the TLB operation, let's restore the host's
	 * view of HCR_EL2.
	 */
	write_sysreg(0, vttbr_el2);
	write_sysreg(HCR_HOST_VHE_FLAGS, hcr_el2);
	isb();
	local_irq_restore(flags);
}
static void write_wb_reg(int reg, int n, u64 val)
{
	switch (reg + n) {
	GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BVR, AARCH64_DBG_REG_NAME_BVR, val);
	GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_BCR, AARCH64_DBG_REG_NAME_BCR, val);
	GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WVR, AARCH64_DBG_REG_NAME_WVR, val);
	GEN_WRITE_WB_REG_CASES(AARCH64_DBG_REG_WCR, AARCH64_DBG_REG_NAME_WCR, val);
	default:
		pr_warning("attempt to write to unknown breakpoint register %d\n", n);
	}
	isb();
}
/*
 * Cache Size Selection Register(CSSELR) selects which Cache Size ID
 * Register(CCSIDR) is accessible by specifying the required cache
 * level and the cache type. We need to ensure that no one else changes
 * CSSELR by calling this in non-preemtible context
 */
u64 __attribute_const__ cache_get_ccsidr(u64 csselr)
{
	u64 ccsidr;

	WARN_ON(preemptible());

	write_sysreg(csselr, csselr_el1);
	isb();
	ccsidr = read_sysreg(ccsidr_el1);

	return ccsidr;
}
Esempio n. 21
0
static void __noreturn stm32_pm_cpu_power_down_wfi(void)
{
	dcache_op_level1(DCACHE_OP_CLEAN);

	io_write32(stm32_rcc_base() + RCC_MP_GRSTCSETR,
		   RCC_MP_GRSTCSETR_MPUP1RST);

	dsb();
	isb();
	wfi();
	panic();
}
Esempio n. 22
0
/*
 * Performance Count Register N
 */
static uint32_t
arm64_pmcn_read(unsigned int pmc)
{

	KASSERT(pmc < arm64_npmcs, ("%s: illegal PMC number %d", __func__, pmc));

	WRITE_SPECIALREG(PMSELR_EL0, pmc);

	isb();

	return (READ_SPECIALREG(PMXEVCNTR_EL0));
}
Esempio n. 23
0
void __hyp_text __kvm_tlb_flush_vmid_ipa(struct kvm *kvm, phys_addr_t ipa)
{
	unsigned long flags;

	dsb(ishst);

	/* Switch to requested VMID */
	kvm = kern_hyp_va(kvm);
	__tlb_switch_to_guest()(kvm, &flags);

	/*
	 * We could do so much better if we had the VA as well.
	 * Instead, we invalidate Stage-2 for this IPA, and the
	 * whole of Stage-1. Weep...
	 */
	ipa >>= 12;
	__tlbi(ipas2e1is, ipa);

	/*
	 * We have to ensure completion of the invalidation at Stage-2,
	 * since a table walk on another CPU could refill a TLB with a
	 * complete (S1 + S2) walk based on the old Stage-2 mapping if
	 * the Stage-1 invalidation happened first.
	 */
	dsb(ish);
	__tlbi(vmalle1is);
	dsb(ish);
	isb();

	/*
	 * If the host is running at EL1 and we have a VPIPT I-cache,
	 * then we must perform I-cache maintenance at EL2 in order for
	 * it to have an effect on the guest. Since the guest cannot hit
	 * I-cache lines allocated with a different VMID, we don't need
	 * to worry about junk out of guest reset (we nuke the I-cache on
	 * VMID rollover), but we do need to be careful when remapping
	 * executable pages for the same guest. This can happen when KSM
	 * takes a CoW fault on an executable page, copies the page into
	 * a page that was previously mapped in the guest and then needs
	 * to invalidate the guest view of the I-cache for that page
	 * from EL1. To solve this, we invalidate the entire I-cache when
	 * unmapping a page from a guest if we have a VPIPT I-cache but
	 * the host is running at EL1. As above, we could do better if
	 * we had the VA.
	 *
	 * The moral of this story is: if you have a VPIPT I-cache, then
	 * you should be running with VHE enabled.
	 */
	if (!has_vhe() && icache_is_vpipt())
		__flush_icache_all();

	__tlb_switch_to_host()(kvm, flags);
}
/**
* @brief    Set the memory attributes for a section of memory in the
*           translation table.
*
* @param	Addr: 32-bit address for which memory attributes need to be set..
* @param	size: size is the size of the region.
* @param	attrib: Attribute for the given memory region.
* @return	None.
*
*
******************************************************************************/
u32 Xil_SetMPURegion(INTPTR addr, u64 size, u32 attrib)
{
	u32 Regionsize = 0;
	INTPTR Localaddr = addr;
	u32 NextAvailableMemRegion;
	unsigned int i;

	NextAvailableMemRegion = Xil_GetNextMPURegion();
	if (NextAvailableMemRegion == 0xFF) {
		xdbg_printf(DEBUG, "No regions available\r\n");
		return XST_FAILURE;
	}

	Xil_DCacheFlush();
	Xil_ICacheInvalidate();

	mtcp(XREG_CP15_MPU_MEMORY_REG_NUMBER,NextAvailableMemRegion);
	isb();

	/* Lookup the size.  */
	for (i = 0; i < sizeof region_size / sizeof region_size[0]; i++) {
		if (size <= region_size[i].size) {
			Regionsize = region_size[i].encoding;
			break;
		}
	}

	Localaddr &= ~(region_size[i].size - 1);

	Regionsize <<= 1;
	Regionsize |= REGION_EN;
	dsb();
	mtcp(XREG_CP15_MPU_REG_BASEADDR, Localaddr);	/* Set base address of a region */
	mtcp(XREG_CP15_MPU_REG_ACCESS_CTRL, attrib);	/* Set the control attribute */
	mtcp(XREG_CP15_MPU_REG_SIZE_EN, Regionsize);	/* set the region size and enable it*/
	dsb();
	isb();
	Xil_UpdateMPUConfig(NextAvailableMemRegion, Localaddr, Regionsize, attrib);
	return XST_SUCCESS;
}
Esempio n. 25
0
shutdown_el2(struct registers *regs, unsigned long vectors)
{
	u32 sctlr_el2;

	/* Disable stage-1 translation, caches must be cleaned. */
	arm_read_sysreg(SCTLR_EL2, sctlr_el2);
	sctlr_el2 &= ~(SCTLR_M_BIT | SCTLR_C_BIT | SCTLR_I_BIT);
	arm_write_sysreg(SCTLR_EL2, sctlr_el2);
	isb();

	/* Clean the MMU registers */
	arm_write_sysreg(HMAIR0, 0);
	arm_write_sysreg(HMAIR1, 0);
	arm_write_sysreg(TTBR0_EL2, 0);
	arm_write_sysreg(TCR_EL2, 0);
	isb();

	/* Reset the vectors as late as possible */
	arm_write_sysreg(HVBAR, vectors);

	vmreturn(regs);
}
Esempio n. 26
0
File: p2m.c Progetto: Marshalzxy/xen
void p2m_restore_state(struct vcpu *n)
{
    register_t hcr;

    hcr = READ_SYSREG(HCR_EL2);
    WRITE_SYSREG(hcr & ~HCR_VM, HCR_EL2);
    isb();

    p2m_load_VTTBR(n->domain);
    isb();

    if ( is_32bit_domain(n->domain) )
        hcr &= ~HCR_RW;
    else
        hcr |= HCR_RW;

    WRITE_SYSREG(n->arch.sctlr, SCTLR_EL1);
    isb();

    WRITE_SYSREG(hcr, HCR_EL2);
    isb();
}
Esempio n. 27
0
/*****************************************************************************
*
* Enable a background Region in MPU with default memory attributes for Cortex R5
* processor.
*
* @param	None.
*
* @return	None.
*
*
******************************************************************************/
static void Xil_EnableBackgroundRegion(void)
{

	u32 CtrlReg, Reg;

	mtcp(XREG_CP15_INVAL_BRANCH_ARRAY, 0);
	Reg=mfcp(XREG_CP15_SYS_CONTROL);
	Reg |= (0x00000001U<<17U);
	dsb();
	mtcp(XREG_CP15_SYS_CONTROL,Reg);
	isb();

}
Esempio n. 28
0
/*****************************************************************************
*
* Invalidate the caches, enable MMU and D Caches for Cortex A53 processor.
*
* @param	None.
* @return	None.
*
******************************************************************************/
void Xil_EnableMMU(void)
{
	u32 Reg;
	Xil_DCacheInvalidate();
	Xil_ICacheInvalidate();

	Reg = mfcp(XREG_CP15_SYS_CONTROL);
	Reg |= (u32)0x05U;
	mtcp(XREG_CP15_SYS_CONTROL, Reg);

	dsb();
	isb();
}
static void coresight_cti_restore(void)
{
	struct cti_info *p_cti_info;
	p_cti_info = &per_cpu(cpu_cti_info, smp_processor_id());

	cti_enable_access();
	writel_relaxed(p_cti_info->cti_ctrl, CTI_REG(0x0));
	writel_relaxed(p_cti_info->cti_en_in1, CTI_REG(0x24));
	writel_relaxed(p_cti_info->cti_en_out6, CTI_REG(0xB8));

	dsb();
	isb();
}
Esempio n. 30
0
void cpuamu_context_restore(unsigned int nr_counters)
{
	struct cpuamu_ctx *ctx = &cpuamu_ctxs[plat_my_core_pos()];
	unsigned int i;

	assert(nr_counters <= CPUAMU_NR_COUNTERS);

	/*
	 * Disable counters.  They were enabled early in the
	 * CPU reset function.
	 */
	cpuamu_write_cpuamcntenclr_el0(ctx->mask);
	isb();

	/* Restore counters */
	for (i = 0; i < nr_counters; i++)
		cpuamu_cnt_write(i, ctx->cnts[i]);
	isb();

	/* Restore counter configuration */
	cpuamu_write_cpuamcntenset_el0(ctx->mask);
}