static void hikey_pwr_domain_on_finish(const psci_power_state_t *target_state) { unsigned long mpidr; int cpu, cluster; mpidr = read_mpidr(); cluster = MPIDR_AFFLVL1_VAL(mpidr); cpu = MPIDR_AFFLVL0_VAL(mpidr); /* * Enable CCI coherency for this cluster. * No need for locks as no other cpu is active at the moment. */ if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr)); /* Zero the jump address in the mailbox for this cpu */ hisi_pwrc_set_core_bx_addr(cpu, cluster, 0); /* Program the GIC per-cpu distributor or re-distributor interface */ gicv2_pcpu_distif_init(); /* Enable the GIC cpu interface */ gicv2_cpuif_enable(); }
static void hikey_pwr_domain_suspend_finish(const psci_power_state_t *target_state) { unsigned long mpidr; unsigned int cluster, cpu; /* Nothing to be done on waking up from retention from CPU level */ if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) return; /* Get the mpidr for this cpu */ mpidr = read_mpidr_el1(); cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFF1_SHIFT; cpu = mpidr & MPIDR_CPU_MASK; /* Enable CCI coherency for cluster */ if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr)); hisi_pwrc_set_core_bx_addr(cpu, cluster, 0); if (SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { gicv2_distif_init(); gicv2_pcpu_distif_init(); gicv2_cpuif_enable(); } else { gicv2_pcpu_distif_init(); gicv2_cpuif_enable(); } }
void hikey960_pwr_domain_off(const psci_power_state_t *target_state) { unsigned long mpidr = read_mpidr_el1(); unsigned int core = mpidr & MPIDR_CPU_MASK; unsigned int cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; clr_ex(); isb(); dsbsy(); gicv2_cpuif_disable(); hisi_clear_cpu_boot_flag(cluster, core); hisi_powerdn_core(cluster, core); /* check if any core is powered up */ if (hisi_test_cpu_down(cluster, core)) { cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1())); isb(); dsbsy(); hisi_powerdn_cluster(cluster, core); } }
static int cores_pwr_domain_on(unsigned long mpidr, uint64_t entrypoint) { uint32_t cpu, cluster; uint32_t cpuon_id; cpu = MPIDR_AFFLVL0_VAL(mpidr); cluster = MPIDR_AFFLVL1_VAL(mpidr); /* Make sure the cpu is off,Before power up the cpu! */ cpus_id_power_domain(cluster, cpu, pmu_pd_off, CKECK_WFEI_MSK); cpuon_id = (cluster * PLATFORM_CLUSTER0_CORE_COUNT) + cpu; assert(cpuson_flags[cpuon_id] == 0); cpuson_flags[cpuon_id] = PMU_CPU_HOTPLUG; cpuson_entry_point[cpuon_id] = entrypoint; /* Switch boot addr to pmusram */ mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster), (PMUSRAM_BASE >> CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); dsb(); cpus_id_power_domain(cluster, cpu, pmu_pd_on, CKECK_WFEI_MSK); mmio_write_32(SGRF_BASE + SGRF_SOC_CON(1 + cluster), (COLD_BOOT_BASE >> CPU_BOOT_ADDR_ALIGN) | CPU_BOOT_ADDR_WMASK); return 0; }
static void hikey_pwr_domain_suspend(const psci_power_state_t *target_state) { u_register_t mpidr = read_mpidr_el1(); unsigned int cpu = mpidr & MPIDR_CPU_MASK; unsigned int cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS; if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) return; if (CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { /* Program the jump address for the target cpu */ hisi_pwrc_set_core_bx_addr(cpu, cluster, hikey_sec_entrypoint); gicv2_cpuif_disable(); if (SYSTEM_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE) hisi_ipc_cpu_suspend(cpu, cluster); } /* Perform the common cluster specific operations */ if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE); cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr)); hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE); if (SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { hisi_pwrc_set_cluster_wfi(1); hisi_pwrc_set_cluster_wfi(0); hisi_ipc_psci_system_off(); } else hisi_ipc_cluster_suspend(cpu, cluster); } }
static int hikey_pwr_domain_on(u_register_t mpidr) { int cpu, cluster; int curr_cluster; cluster = MPIDR_AFFLVL1_VAL(mpidr); cpu = MPIDR_AFFLVL0_VAL(mpidr); curr_cluster = MPIDR_AFFLVL1_VAL(read_mpidr()); if (cluster != curr_cluster) hisi_ipc_cluster_on(cpu, cluster); hisi_pwrc_set_core_bx_addr(cpu, cluster, hikey_sec_entrypoint); hisi_pwrc_enable_debug(cpu, cluster); hisi_ipc_cpu_on(cpu, cluster); return 0; }
void plat_cci_enable(void) { /* * Enable CCI coherency for this cluster. * No need for locks as no other cpu is active at the moment. */ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); }
static int sunxi_pwr_domain_on(u_register_t mpidr) { if (mpidr_is_valid(mpidr) == 0) return PSCI_E_INTERN_FAIL; sunxi_cpu_on(MPIDR_AFFLVL1_VAL(mpidr), MPIDR_AFFLVL0_VAL(mpidr)); return PSCI_E_SUCCESS; }
static void __dead2 sunxi_pwr_down_wfi(const psci_power_state_t *target_state) { u_register_t mpidr = read_mpidr(); sunxi_cpu_off(MPIDR_AFFLVL1_VAL(mpidr), MPIDR_AFFLVL0_VAL(mpidr)); while (1) wfi(); }
static void hikey960_pwr_domain_on_finish(const psci_power_state_t *target_state) { if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1())); gicv2_pcpu_distif_init(); gicv2_cpuif_enable(); }
void hikey_pwr_domain_off(const psci_power_state_t *target_state) { unsigned long mpidr; int cpu, cluster; mpidr = read_mpidr(); cluster = MPIDR_AFFLVL1_VAL(mpidr); cpu = MPIDR_AFFLVL0_VAL(mpidr); gicv2_cpuif_disable(); hisi_ipc_cpu_off(cpu, cluster); if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) { hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE); cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr)); hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE); hisi_ipc_cluster_off(cpu, cluster); } }
/******************************************************************************* * Suspend the current CPU cluster ******************************************************************************/ void tegra_fc_cluster_idle(uint32_t mpidr) { int cpu = mpidr & MPIDR_CPU_MASK; uint32_t val; VERBOSE("Entering cluster idle state...\n"); tegra_fc_cc4_ctrl(cpu, 0); /* hardware L2 flush is faster for A53 only */ tegra_fc_write_32(FLOWCTRL_L2_FLUSH_CONTROL, !!MPIDR_AFFLVL1_VAL(mpidr)); /* suspend the CPU cluster */ val = FLOWCTRL_PG_CPU_NONCPU << FLOWCTRL_ENABLE_EXT; tegra_fc_prepare_suspend(cpu, val); }
static void nonboot_cpus_off(void) { uint32_t boot_cpu, boot_cluster, cpu; boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1()); boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1()); /* turn off noboot cpus */ for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) { if (!boot_cluster && (cpu == boot_cpu)) continue; cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK); } for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) { if (boot_cluster && (cpu == boot_cpu)) continue; cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK); } }
void bl31_early_platform_setup(bl31_params_t *from_bl2, void *plat_params_from_bl2) { arm_bl31_early_platform_setup(from_bl2, plat_params_from_bl2); /* * Initialize CCI for this cluster during cold boot. * No need for locks as no other CPU is active. */ arm_cci_init(); /* * Enable CCI coherency for the primary CPU's cluster. * Earlier bootloader stages might already do this (e.g. Trusted * Firmware's BL1 does it) but we can't assume so. There is no harm in * executing this code twice anyway. * Platform specific PSCI code will enable coherency for other * clusters. */ cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); }
static void __uniphier_cci_disable(void) { cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1())); }
void plat_cci_disable(void) { #ifdef PLAT_RK_CCI_BASE cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); #endif }
void plat_cci_disable(void) { cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr())); }
/****************************************************************************** * Helper function to remove current master from coherency *****************************************************************************/ void plat_arm_interconnect_exit_coherency(void) { cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1())); }