static void plat_save_el3_dormant_data() { struct _el3_dormant_data *p = &el3_dormant_data[0]; p->mp0_l2actlr_el1 = read_l2actlr(); p->mp0_l2ectlr_el1 = read_l2ectlr(); //backup L2RSTDISABLE and set as "not disable L2 reset" p->mp0_l2rstdisable = mmio_read_32(MP0_CA7L_CACHE_CONFIG); mmio_write_32(MP0_CA7L_CACHE_CONFIG, mmio_read_32(MP0_CA7L_CACHE_CONFIG) & ~L2RSTDISABLE); }
int workaround_826319(unsigned long mpidr) { unsigned long l2actlr; /** only apply on 1st CPU of each cluster **/ if (mpidr & MPIDR_CPU_MASK) return 0; /** CONFIG_ARM_ERRATA_826319=y (for 6595/6752) * Prog CatB Rare, * System might deadlock if a write cannot complete until read data is accepted * worksround: (L2ACTLR[14]=0, L2ACTLR[3]=1). * L2ACTLR must be written before MMU on and any ACE, CHI or ACP traffic. **/ l2actlr = read_l2actlr(); l2actlr = (l2actlr & ~(1<<14)) | (1<<3); write_l2actlr(l2actlr); return 0; }
/******************************************************************************* * MTK_platform handler called when an affinity instance is about to be suspended. The * level and mpidr determine the affinity instance. The 'state' arg. allows the * platform to decide whether the cluster is being turned off and take apt * actions. * * CAUTION: This function is called with coherent stacks so that caches can be * turned off, flushed and coherency disabled. There is no guarantee that caches * will remain turned on across calls to this function as each affinity level is * dealt with. So do not write & read global variables across calls. It will be * wise to do flush a write to the global to prevent unpredictable results. ******************************************************************************/ int mt_affinst_suspend(unsigned long mpidr, unsigned long sec_entrypoint, unsigned long ns_entrypoint, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned int gicc_base, ectlr; unsigned long cpu_setup, cci_setup, linear_id; mailbox_t *mt_mboxes; switch (afflvl) { case MPIDR_AFFLVL2: if (state == PSCI_STATE_OFF) { struct _el3_dormant_data *p = &el3_dormant_data[0]; p->mp0_l2actlr_el1 = read_l2actlr(); p->mp0_l2ectlr_el1 = read_l2ectlr(); //backup L2RSTDISABLE and set as "not disable L2 reset" p->mp0_l2rstdisable = mmio_read_32(MP0_CA7L_CACHE_CONFIG); mmio_write_32(MP0_CA7L_CACHE_CONFIG, mmio_read_32(MP0_CA7L_CACHE_CONFIG) & ~L2RSTDISABLE); //backup generic timer //printf("[ATF_Suspend]read_cntpct_el0()=%lu\n", read_cntpct_el0()); generic_timer_backup(); gic_dist_save(); } break; case MPIDR_AFFLVL1: if (state == PSCI_STATE_OFF) { /* * Disable coherency if this cluster is to be * turned off */ cci_setup = mt_get_cfgvar(CONFIG_HAS_CCI); if (cci_setup) { cci_disable_coherency(mpidr); } disable_scu(mpidr); trace_power_flow(mpidr, CLUSTER_SUSPEND); } break; case MPIDR_AFFLVL0: if (state == PSCI_STATE_OFF) { //set cpu0 as aa64 for cpu reset mmio_write_32(MP0_MISC_CONFIG3, mmio_read_32(MP0_MISC_CONFIG3) | (1<<12)); /* * Take this cpu out of intra-cluster coherency if * the MTK_platform flavour supports the SMP bit. */ cpu_setup = mt_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr &= ~CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* Program the jump address for the target cpu */ linear_id = platform_get_core_pos(mpidr); mt_mboxes = (mailbox_t *) (MBOX_OFF); mt_mboxes[linear_id].value = sec_entrypoint; flush_dcache_range((unsigned long) &mt_mboxes[linear_id], sizeof(unsigned long)); /* * Prevent interrupts from spuriously waking up * this cpu */ //gic_cpu_save(); gicc_base = mt_get_cfgvar(CONFIG_GICC_ADDR); gic_cpuif_deactivate(gicc_base); trace_power_flow(mpidr, CPU_SUSPEND); } break; default: assert(0); } return rc; }