/******************************************************************************* * MTK_platform handler called when an affinity instance is about to be turned off. The * level and mpidr determine the affinity instance. The 'state' arg. allows the * platform to decide whether the cluster is being turned off and take apt * actions. * * CAUTION: This function is called with coherent stacks so that caches can be * turned off, flushed and coherency disabled. There is no guarantee that caches * will remain turned on across calls to this function as each affinity level is * dealt with. So do not write & read global variables across calls. It will be * wise to do flush a write to the global to prevent unpredictable results. ******************************************************************************/ int mt_affinst_off(unsigned long mpidr, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned int gicc_base, ectlr; unsigned long cpu_setup, cci_setup; switch (afflvl) { case MPIDR_AFFLVL3: case MPIDR_AFFLVL2: break; case MPIDR_AFFLVL1: if (state == PSCI_STATE_OFF) { /* * Disable coherency if this cluster is to be * turned off */ cci_setup = mt_get_cfgvar(CONFIG_HAS_CCI); if (cci_setup) { cci_disable_coherency(mpidr); } disable_scu(mpidr); trace_power_flow(mpidr, CLUSTER_DOWN); } break; case MPIDR_AFFLVL0: if (state == PSCI_STATE_OFF) { /* * Take this cpu out of intra-cluster coherency if * the MTK_platform flavour supports the SMP bit. */ cpu_setup = mt_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr &= ~CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* * Prevent interrupts from spuriously waking up * this cpu */ //gic_cpu_save(); gicc_base = mt_get_cfgvar(CONFIG_GICC_ADDR); gic_cpuif_deactivate(gicc_base); trace_power_flow(mpidr, CPU_DOWN); } break; default: assert(0); } return rc; }
/******************************************************************************* * FVP handler called when an affinity instance is about to be turned off. The * level and mpidr determine the affinity instance. The 'state' arg. allows the * platform to decide whether the cluster is being turned off and take apt * actions. * * CAUTION: There is no guarantee that caches will remain turned on across calls * to this function as each affinity level is dealt with. So do not write & read * global variables across calls. It will be wise to do flush a write to the * global to prevent unpredictable results. ******************************************************************************/ int plat_affinst_off(unsigned long mpidr, unsigned int afflvl, unsigned int state) { /* Determine if any platform actions need to be executed */ if (plat_do_plat_actions(afflvl, state) == -EAGAIN) return PSCI_E_SUCCESS; unsigned int gicc_base, ectlr; /* * If execution reaches this stage then this affinity level will be * suspended. Perform at least the cpu specific actions followed the * cluster specific operations if applicable. */ // plat_cpu_pwrdwn_common(); /* * Take this cpu out of intra-cluster coherency if * the MTK_platform flavour supports the SMP bit. */ ectlr = read_cpuectlr(); ectlr &= ~CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); /* * Prevent interrupts from spuriously waking up * this cpu */ gicc_base = get_plat_config()->gicc_base; gic_cpuif_deactivate(gicc_base); /* * Perform cluster power down */ if (afflvl != MPIDR_AFFLVL0) { // plat_cluster_pwrdwn_common(); /* * Disable coherency if this cluster is to be * turned off */ if (get_plat_config()->flags & CONFIG_HAS_CCI) { cci_disable_cluster_coherency(mpidr); } disable_scu(mpidr); } return PSCI_E_SUCCESS; }
/******************************************************************************* * FVP handler called when an affinity instance is about to be suspended. The * level and mpidr determine the affinity instance. The 'state' arg. allows the * platform to decide whether the cluster is being turned off and take apt * actions. * * CAUTION: There is no guarantee that caches will remain turned on across calls * to this function as each affinity level is dealt with. So do not write & read * global variables across calls. It will be wise to do flush a write to the * global to prevent unpredictable results. ******************************************************************************/ int plat_affinst_suspend(unsigned long mpidr, unsigned long sec_entrypoint, unsigned long ns_entrypoint, unsigned int afflvl, unsigned int state) { unsigned int ectlr; /* Determine if any platform actions need to be executed. */ if (plat_do_plat_actions(afflvl, state) == -EAGAIN) return PSCI_E_SUCCESS; //set cpu0 as aa64 for cpu reset mmio_write_32(MP0_MISC_CONFIG3, mmio_read_32(MP0_MISC_CONFIG3) | (1<<12)); ectlr = read_cpuectlr(); ectlr &= ~CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); /* Program the jump address for the target cpu */ plat_program_mailbox(read_mpidr_el1(), sec_entrypoint); /* Program the power controller to enable wakeup interrupts. */ // plat_pwrc_set_wen(mpidr); /* Perform the common cpu specific operations */ // plat_cpu_pwrdwn_common(); gic_cpuif_deactivate(get_plat_config()->gicc_base); /* Perform the common cluster specific operations */ if (afflvl >= MPIDR_AFFLVL1) { // plat_cluster_pwrdwn_common(); if (get_plat_config()->flags & CONFIG_HAS_CCI) cci_disable_cluster_coherency(mpidr); disable_scu(mpidr); } if (afflvl >= MPIDR_AFFLVL2) { plat_save_el3_dormant_data(); generic_timer_backup(); gic_dist_save(); } return PSCI_E_SUCCESS; }
/******************************************************************************* * MTK_platform handler called when an affinity instance has just been powered on after * being turned off earlier. The level and mpidr determine the affinity * instance. The 'state' arg. allows the platform to decide whether the cluster * was turned off prior to wakeup and do what's necessary to setup it up * correctly. ******************************************************************************/ int mt_affinst_on_finish(unsigned long mpidr, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned long linear_id, cpu_setup; mailbox_t *mt_mboxes; unsigned int gicd_base, gicc_base, ectlr; switch (afflvl) { case MPIDR_AFFLVL2: if (state == PSCI_STATE_OFF) { // __asm__ __volatile__ ("1: b 1b \n\t"); } gicd_base = mt_get_cfgvar(CONFIG_GICD_ADDR); gic_pcpu_distif_setup(gicd_base); break; case MPIDR_AFFLVL1: /* Enable coherency if this cluster was off */ if (state == PSCI_STATE_OFF) { enable_scu(mpidr); mt_cci_setup(); trace_power_flow(mpidr, CLUSTER_UP); } break; case MPIDR_AFFLVL0: /* * Ignore the state passed for a cpu. It could only have * been off if we are here. */ workaround_836870(mpidr); /* * Turn on intra-cluster coherency if the MTK_platform flavour supports * it. */ cpu_setup = mt_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr |= CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* Zero the jump address in the mailbox for this cpu */ mt_mboxes = (mailbox_t *) (MBOX_OFF); linear_id = platform_get_core_pos(mpidr); mt_mboxes[linear_id].value = 0; flush_dcache_range((unsigned long) &mt_mboxes[linear_id], sizeof(unsigned long)); gicc_base = mt_get_cfgvar(CONFIG_GICC_ADDR); /* Enable the gic cpu interface */ gic_cpuif_setup(gicc_base); //gic_cpu_restore(); #if 0 //fixme, /* Allow access to the System counter timer module */ reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val); reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1)); mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val); #endif enable_ns_access_to_cpuectlr(); trace_power_flow(mpidr, CPU_UP); break; default: assert(0); } return rc; }
/******************************************************************************* * MTK_platform handler called when an affinity instance is about to be suspended. The * level and mpidr determine the affinity instance. The 'state' arg. allows the * platform to decide whether the cluster is being turned off and take apt * actions. * * CAUTION: This function is called with coherent stacks so that caches can be * turned off, flushed and coherency disabled. There is no guarantee that caches * will remain turned on across calls to this function as each affinity level is * dealt with. So do not write & read global variables across calls. It will be * wise to do flush a write to the global to prevent unpredictable results. ******************************************************************************/ int mt_affinst_suspend(unsigned long mpidr, unsigned long sec_entrypoint, unsigned long ns_entrypoint, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned int gicc_base, ectlr; unsigned long cpu_setup, cci_setup, linear_id; mailbox_t *mt_mboxes; switch (afflvl) { case MPIDR_AFFLVL2: if (state == PSCI_STATE_OFF) { struct _el3_dormant_data *p = &el3_dormant_data[0]; p->mp0_l2actlr_el1 = read_l2actlr(); p->mp0_l2ectlr_el1 = read_l2ectlr(); //backup L2RSTDISABLE and set as "not disable L2 reset" p->mp0_l2rstdisable = mmio_read_32(MP0_CA7L_CACHE_CONFIG); mmio_write_32(MP0_CA7L_CACHE_CONFIG, mmio_read_32(MP0_CA7L_CACHE_CONFIG) & ~L2RSTDISABLE); //backup generic timer //printf("[ATF_Suspend]read_cntpct_el0()=%lu\n", read_cntpct_el0()); generic_timer_backup(); gic_dist_save(); } break; case MPIDR_AFFLVL1: if (state == PSCI_STATE_OFF) { /* * Disable coherency if this cluster is to be * turned off */ cci_setup = mt_get_cfgvar(CONFIG_HAS_CCI); if (cci_setup) { cci_disable_coherency(mpidr); } disable_scu(mpidr); trace_power_flow(mpidr, CLUSTER_SUSPEND); } break; case MPIDR_AFFLVL0: if (state == PSCI_STATE_OFF) { //set cpu0 as aa64 for cpu reset mmio_write_32(MP0_MISC_CONFIG3, mmio_read_32(MP0_MISC_CONFIG3) | (1<<12)); /* * Take this cpu out of intra-cluster coherency if * the MTK_platform flavour supports the SMP bit. */ cpu_setup = mt_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr &= ~CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* Program the jump address for the target cpu */ linear_id = platform_get_core_pos(mpidr); mt_mboxes = (mailbox_t *) (MBOX_OFF); mt_mboxes[linear_id].value = sec_entrypoint; flush_dcache_range((unsigned long) &mt_mboxes[linear_id], sizeof(unsigned long)); /* * Prevent interrupts from spuriously waking up * this cpu */ //gic_cpu_save(); gicc_base = mt_get_cfgvar(CONFIG_GICC_ADDR); gic_cpuif_deactivate(gicc_base); trace_power_flow(mpidr, CPU_SUSPEND); } break; default: assert(0); } return rc; }
/******************************************************************************* * FVP handler called when an affinity instance has just been powered on after * being turned off earlier. The level and mpidr determine the affinity * instance. The 'state' arg. allows the platform to decide whether the cluster * was turned off prior to wakeup and do what's necessary to setup it up * correctly. ******************************************************************************/ int fvp_affinst_on_finish(unsigned long mpidr, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned long linear_id, cpu_setup, cci_setup; mailbox *fvp_mboxes; unsigned int gicd_base, gicc_base, reg_val, ectlr; switch (afflvl) { case MPIDR_AFFLVL1: /* Enable coherency if this cluster was off */ if (state == PSCI_STATE_OFF) { /* * This CPU might have woken up whilst the * cluster was attempting to power down. In * this case the FVP power controller will * have a pending cluster power off request * which needs to be cleared by writing to the * PPONR register. This prevents the power * controller from interpreting a subsequent * entry of this cpu into a simple wfi as a * power down request. */ fvp_pwrc_write_pponr(mpidr); cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); if (cci_setup) { cci_enable_coherency(mpidr); } } break; case MPIDR_AFFLVL0: /* * Ignore the state passed for a cpu. It could only have * been off if we are here. */ /* * Turn on intra-cluster coherency if the FVP flavour supports * it. */ cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr |= CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* * Clear PWKUPR.WEN bit to ensure interrupts do not interfere * with a cpu power down unless the bit is set again */ fvp_pwrc_clr_wen(mpidr); /* Zero the jump address in the mailbox for this cpu */ fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); linear_id = platform_get_core_pos(mpidr); fvp_mboxes[linear_id].value = 0; flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], sizeof(unsigned long)); gicd_base = platform_get_cfgvar(CONFIG_GICD_ADDR); gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); /* Enable the gic cpu interface */ gic_cpuif_setup(gicc_base); /* TODO: This setup is needed only after a cold boot */ gic_pcpu_distif_setup(gicd_base); /* Allow access to the System counter timer module */ reg_val = (1 << CNTACR_RPCT_SHIFT) | (1 << CNTACR_RVCT_SHIFT); reg_val |= (1 << CNTACR_RFRQ_SHIFT) | (1 << CNTACR_RVOFF_SHIFT); reg_val |= (1 << CNTACR_RWVT_SHIFT) | (1 << CNTACR_RWPT_SHIFT); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(0), reg_val); mmio_write_32(SYS_TIMCTL_BASE + CNTACR_BASE(1), reg_val); reg_val = (1 << CNTNSAR_NS_SHIFT(0)) | (1 << CNTNSAR_NS_SHIFT(1)); mmio_write_32(SYS_TIMCTL_BASE + CNTNSAR, reg_val); break; default: assert(0); } return rc; }
/******************************************************************************* * FVP handler called when an affinity instance is about to be suspended. The * level and mpidr determine the affinity instance. The 'state' arg. allows the * platform to decide whether the cluster is being turned off and take apt * actions. * * CAUTION: This function is called with coherent stacks so that caches can be * turned off, flushed and coherency disabled. There is no guarantee that caches * will remain turned on across calls to this function as each affinity level is * dealt with. So do not write & read global variables across calls. It will be * wise to do flush a write to the global to prevent unpredictable results. ******************************************************************************/ int fvp_affinst_suspend(unsigned long mpidr, unsigned long sec_entrypoint, unsigned long ns_entrypoint, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned int gicc_base, ectlr; unsigned long cpu_setup, cci_setup, linear_id; mailbox *fvp_mboxes; switch (afflvl) { case MPIDR_AFFLVL1: if (state == PSCI_STATE_OFF) { /* * Disable coherency if this cluster is to be * turned off */ cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); if (cci_setup) { cci_disable_coherency(mpidr); } /* * Program the power controller to turn the * cluster off */ fvp_pwrc_write_pcoffr(mpidr); } break; case MPIDR_AFFLVL0: if (state == PSCI_STATE_OFF) { /* * Take this cpu out of intra-cluster coherency if * the FVP flavour supports the SMP bit. */ cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr &= ~CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* Program the jump address for the target cpu */ linear_id = platform_get_core_pos(mpidr); fvp_mboxes = (mailbox *) (TZDRAM_BASE + MBOX_OFF); fvp_mboxes[linear_id].value = sec_entrypoint; flush_dcache_range((unsigned long) &fvp_mboxes[linear_id], sizeof(unsigned long)); /* * Prevent interrupts from spuriously waking up * this cpu */ gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); gic_cpuif_deactivate(gicc_base); /* * Program the power controller to power this * cpu off and enable wakeup interrupts. */ fvp_pwrc_set_wen(mpidr); fvp_pwrc_write_ppoffr(mpidr); } break; default: assert(0); } return rc; }
/******************************************************************************* * FVP handler called when an affinity instance is about to be turned off. The * level and mpidr determine the affinity instance. The 'state' arg. allows the * platform to decide whether the cluster is being turned off and take apt * actions. * * CAUTION: This function is called with coherent stacks so that caches can be * turned off, flushed and coherency disabled. There is no guarantee that caches * will remain turned on across calls to this function as each affinity level is * dealt with. So do not write & read global variables across calls. It will be * wise to do flush a write to the global to prevent unpredictable results. ******************************************************************************/ int fvp_affinst_off(unsigned long mpidr, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned int gicc_base, ectlr; unsigned long cpu_setup, cci_setup; switch (afflvl) { case MPIDR_AFFLVL1: if (state == PSCI_STATE_OFF) { /* * Disable coherency if this cluster is to be * turned off */ cci_setup = platform_get_cfgvar(CONFIG_HAS_CCI); if (cci_setup) { cci_disable_coherency(mpidr); } /* * Program the power controller to turn the * cluster off */ fvp_pwrc_write_pcoffr(mpidr); } break; case MPIDR_AFFLVL0: if (state == PSCI_STATE_OFF) { /* * Take this cpu out of intra-cluster coherency if * the FVP flavour supports the SMP bit. */ cpu_setup = platform_get_cfgvar(CONFIG_CPU_SETUP); if (cpu_setup) { ectlr = read_cpuectlr(); ectlr &= ~CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); } /* * Prevent interrupts from spuriously waking up * this cpu */ gicc_base = platform_get_cfgvar(CONFIG_GICC_ADDR); gic_cpuif_deactivate(gicc_base); /* * Program the power controller to power this * cpu off */ fvp_pwrc_write_ppoffr(mpidr); } break; default: assert(0); } return rc; }
/******************************************************************************* * FVP handler called when an affinity instance has just been powered on after * being turned off earlier. The level and mpidr determine the affinity * instance. The 'state' arg. allows the platform to decide whether the cluster * was turned off prior to wakeup and do what's necessary to setup it up * correctly. ******************************************************************************/ int plat_affinst_on_finish(unsigned long mpidr, unsigned int afflvl, unsigned int state) { int rc = PSCI_E_SUCCESS; unsigned ectlr; /* Determine if any platform actions need to be executed. */ if (plat_do_plat_actions(afflvl, state) == -EAGAIN) return PSCI_E_SUCCESS; /* Perform the common cluster specific operations */ if (afflvl >= MPIDR_AFFLVL1) { /* * This CPU might have woken up whilst the cluster was * attempting to power down. In this case the FVP power * controller will have a pending cluster power off request * which needs to be cleared by writing to the PPONR register. * This prevents the power controller from interpreting a * subsequent entry of this cpu into a simple wfi as a power * down request. */ // plat_pwrc_write_pponr(mpidr); enable_scu(mpidr); /* Enable coherency if this cluster was off */ plat_cci_enable(); } /* * Ignore the state passed for a cpu. It could only have * been off if we are here. */ workaround_836870(mpidr); /* * Turn on intra-cluster coherency if the MTK_platform flavour supports * it. */ ectlr = read_cpuectlr(); ectlr |= CPUECTLR_SMP_BIT; write_cpuectlr(ectlr); /* * Clear PWKUPR.WEN bit to ensure interrupts do not interfere * with a cpu power down unless the bit is set again */ // plat_pwrc_clr_wen(mpidr); /* Zero the jump address in the mailbox for this cpu */ plat_program_mailbox(read_mpidr_el1(), 0); /* Enable the gic cpu interface */ // arm_gic_cpuif_setup(); gic_cpuif_setup(get_plat_config()->gicc_base); gic_pcpu_distif_setup(get_plat_config()->gicd_base); /* TODO: This setup is needed only after a cold boot */ // arm_gic_pcpu_distif_setup(); enable_ns_access_to_cpuectlr(); return rc; }