예제 #1
0
int
elf_cpu_load_file(linker_file_t lf)
{

	/*
	 * The pmap code does not do an icache sync upon establishing executable
	 * mappings in the kernel pmap.  It's an optimization based on the fact
	 * that kernel memory allocations always have EXECUTABLE protection even
	 * when the memory isn't going to hold executable code.  The only time
	 * kernel memory holding instructions does need a sync is after loading
	 * a kernel module, and that's when this function gets called.
	 *
	 * This syncs data and instruction caches after loading a module.  We
	 * don't worry about the kernel itself (lf->id is 1) as locore.S did
	 * that on entry.  Even if data cache maintenance was done by IO code,
	 * the relocation fixup process creates dirty cache entries that we must
	 * write back before doing icache sync. The instruction cache sync also
	 * invalidates the branch predictor cache on platforms that have one.
	 */
	if (lf->id == 1)
		return (0);
#if __ARM_ARCH >= 6
	dcache_wb_pou((vm_offset_t)lf->address, (vm_size_t)lf->size);
	icache_inv_all();
#else
	cpu_dcache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
	cpu_l2cache_wb_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
	cpu_icache_sync_range((vm_offset_t)lf->address, (vm_size_t)lf->size);
#endif
	return (0);
}
예제 #2
0
int imx7d_cpuidle_init(void)
{
	uint32_t lpm_idle_ocram_base =
		core_mmu_get_va(TRUSTZONE_OCRAM_START +
				LOWPOWER_IDLE_OCRAM_OFFSET,
				MEM_AREA_TEE_COHERENT);
	struct imx7_pm_info *p =
		(struct imx7_pm_info *)lpm_idle_ocram_base;

	pm_imx7_iram_tbl_init();

	dcache_op_level1(DCACHE_OP_CLEAN_INV);

	p->va_base = lpm_idle_ocram_base;
	p->pa_base = TRUSTZONE_OCRAM_START + LOWPOWER_IDLE_OCRAM_OFFSET;
	p->tee_resume = (paddr_t)virt_to_phys((void *)(vaddr_t)v7_cpu_resume);
	p->pm_info_size = sizeof(*p);
	p->ddrc_va_base = core_mmu_get_va(DDRC_BASE, MEM_AREA_IO_SEC);
	p->ddrc_pa_base = DDRC_BASE;
	p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC);
	p->ccm_pa_base = CCM_BASE;
	p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC);
	p->anatop_pa_base = ANATOP_BASE;
	p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC);
	p->src_pa_base = SRC_BASE;
	p->iomuxc_gpr_va_base = core_mmu_get_va(IOMUXC_GPR_BASE,
						MEM_AREA_IO_SEC);
	p->iomuxc_gpr_pa_base = IOMUXC_GPR_BASE;
	p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC);
	p->gpc_pa_base = GPC_BASE;
	p->gic_va_base = core_mmu_get_va(GIC_BASE, MEM_AREA_IO_SEC);
	p->gic_pa_base = GIC_BASE;

	p->num_lpi_cpus = 0;
	p->num_online_cpus = -1;

	memcpy((void *)(lpm_idle_ocram_base + sizeof(*p)),
	       (void *)(vaddr_t)imx7d_low_power_idle,
	       LOWPOWER_IDLE_OCRAM_SIZE - sizeof(*p));

	dcache_clean_range((void *)lpm_idle_ocram_base,
			   LOWPOWER_IDLE_OCRAM_SIZE);
	/*
	 * Note that IRAM IOSEC map, if changed to MEM map,
	 * need to flush cache
	 */
	icache_inv_all();

	return 0;
}