int swsusp_arch_suspend(void) { int ret = 0; unsigned long flags; struct sleep_stack_data state; if (cpus_are_stuck_in_kernel()) { pr_err("Can't hibernate: no mechanism to offline secondary CPUs.\n"); return -EBUSY; } flags = local_daif_save(); if (__cpu_suspend_enter(&state)) { /* make the crash dump kernel image visible/saveable */ crash_prepare_suspend(); sleep_cpu = smp_processor_id(); ret = swsusp_save(); } else { /* Clean kernel core startup/idle code to PoC*/ dcache_clean_range(__mmuoff_data_start, __mmuoff_data_end); dcache_clean_range(__idmap_text_start, __idmap_text_end); /* Clean kvm setup code to PoC? */ if (el2_reset_needed()) dcache_clean_range(__hyp_idmap_text_start, __hyp_idmap_text_end); /* make the crash dump kernel image protected again */ crash_post_resume(); /* * Tell the hibernation core that we've just restored * the memory */ in_suspend = 0; sleep_cpu = -EINVAL; __cpu_suspend_exit(); } local_daif_restore(flags); return ret; }
int imx7d_cpuidle_init(void) { uint32_t lpm_idle_ocram_base = core_mmu_get_va(TRUSTZONE_OCRAM_START + LOWPOWER_IDLE_OCRAM_OFFSET, MEM_AREA_TEE_COHERENT); struct imx7_pm_info *p = (struct imx7_pm_info *)lpm_idle_ocram_base; pm_imx7_iram_tbl_init(); dcache_op_level1(DCACHE_OP_CLEAN_INV); p->va_base = lpm_idle_ocram_base; p->pa_base = TRUSTZONE_OCRAM_START + LOWPOWER_IDLE_OCRAM_OFFSET; p->tee_resume = (paddr_t)virt_to_phys((void *)(vaddr_t)v7_cpu_resume); p->pm_info_size = sizeof(*p); p->ddrc_va_base = core_mmu_get_va(DDRC_BASE, MEM_AREA_IO_SEC); p->ddrc_pa_base = DDRC_BASE; p->ccm_va_base = core_mmu_get_va(CCM_BASE, MEM_AREA_IO_SEC); p->ccm_pa_base = CCM_BASE; p->anatop_va_base = core_mmu_get_va(ANATOP_BASE, MEM_AREA_IO_SEC); p->anatop_pa_base = ANATOP_BASE; p->src_va_base = core_mmu_get_va(SRC_BASE, MEM_AREA_IO_SEC); p->src_pa_base = SRC_BASE; p->iomuxc_gpr_va_base = core_mmu_get_va(IOMUXC_GPR_BASE, MEM_AREA_IO_SEC); p->iomuxc_gpr_pa_base = IOMUXC_GPR_BASE; p->gpc_va_base = core_mmu_get_va(GPC_BASE, MEM_AREA_IO_SEC); p->gpc_pa_base = GPC_BASE; p->gic_va_base = core_mmu_get_va(GIC_BASE, MEM_AREA_IO_SEC); p->gic_pa_base = GIC_BASE; p->num_lpi_cpus = 0; p->num_online_cpus = -1; memcpy((void *)(lpm_idle_ocram_base + sizeof(*p)), (void *)(vaddr_t)imx7d_low_power_idle, LOWPOWER_IDLE_OCRAM_SIZE - sizeof(*p)); dcache_clean_range((void *)lpm_idle_ocram_base, LOWPOWER_IDLE_OCRAM_SIZE); /* * Note that IRAM IOSEC map, if changed to MEM map, * need to flush cache */ icache_inv_all(); return 0; }
/* * This is called after loading something into memory. We need to * make sure that everything that was loaded is actually written to * RAM, and that the icache will look for it. Cleaning the dcache and * invalidating the icache will do the trick. */ void flush_cache (unsigned long start_addr, unsigned long size) { dcache_clean_range((void *)start_addr, size); icache_invalidate_range((void *)start_addr, size); }
static inline void _dcache_flush_range_for_net(unsigned startAddr, unsigned endAddr) { dcache_clean_range(startAddr,endAddr-startAddr+1); return; }