/******************************************************************************* * This function does the architectural setup and takes the warm boot * entry-point `mailbox_ep` as an argument. The function also initializes the * power domain topology tree by querying the platform. The power domain nodes * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform * exports its static topology map through the * populate_power_domain_topology_tree() API. The algorithm populates the * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this * topology map. On a platform that implements two clusters of 2 cpus each, * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would * look like this: * * --------------------------------------------------- * | system node | cluster 0 node | cluster 1 node | * --------------------------------------------------- * * And populated psci_cpu_pd_nodes would look like this : * <- cpus cluster0 -><- cpus cluster1 -> * ------------------------------------------------ * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | * ------------------------------------------------ ******************************************************************************/ int __init psci_setup(const psci_lib_args_t *lib_args) { const unsigned char *topology_tree; assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args)); /* Do the Architectural initialization */ psci_arch_setup(); /* Query the topology map from the platform */ topology_tree = plat_get_power_domain_tree_desc(); /* Populate the power domain arrays using the platform topology map */ populate_power_domain_tree(topology_tree); /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ psci_update_pwrlvl_limits(); /* Populate the mpidr field of cpu node for this CPU */ psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; psci_init_req_local_pwr_states(); /* * Set the requested and target state of this CPU and all the higher * power domain levels for this CPU to run. */ psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); (void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep, &psci_plat_pm_ops); assert(psci_plat_pm_ops != NULL); /* * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs * during warm boot, possibly before data cache is enabled. */ psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops, sizeof(psci_plat_pm_ops)); /* Initialize the psci capability */ psci_caps = PSCI_GENERIC_CAP; if (psci_plat_pm_ops->pwr_domain_off != NULL) psci_caps |= define_psci_cap(PSCI_CPU_OFF); if ((psci_plat_pm_ops->pwr_domain_on != NULL) && (psci_plat_pm_ops->pwr_domain_on_finish != NULL)) psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); if ((psci_plat_pm_ops->pwr_domain_suspend != NULL) && (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)) { psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); if (psci_plat_pm_ops->get_sys_suspend_power_state != NULL) psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); } if (psci_plat_pm_ops->system_off != NULL) psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); if (psci_plat_pm_ops->system_reset != NULL) psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); if (psci_plat_pm_ops->get_node_hw_state != NULL) psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64); if ((psci_plat_pm_ops->read_mem_protect != NULL) && (psci_plat_pm_ops->write_mem_protect != NULL)) psci_caps |= define_psci_cap(PSCI_MEM_PROTECT); if (psci_plat_pm_ops->mem_protect_chk != NULL) psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64); if (psci_plat_pm_ops->system_reset2 != NULL) psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64); #if ENABLE_PSCI_STAT psci_caps |= define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64); psci_caps |= define_psci_cap(PSCI_STAT_COUNT_AARCH64); #endif return 0; }
/******************************************************************************* * This function initializes the power domain topology tree by querying the * platform. The power domain nodes higher than the CPU are populated in the * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in * psci_cpu_pd_nodes[]. The platform exports its static topology map through the * populate_power_domain_topology_tree() API. The algorithm populates the * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this * topology map. On a platform that implements two clusters of 2 cpus each, and * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look * like this: * * --------------------------------------------------- * | system node | cluster 0 node | cluster 1 node | * --------------------------------------------------- * * And populated psci_cpu_pd_nodes would look like this : * <- cpus cluster0 -><- cpus cluster1 -> * ------------------------------------------------ * | CPU 0 | CPU 1 | CPU 2 | CPU 3 | * ------------------------------------------------ ******************************************************************************/ int psci_setup(void) { const unsigned char *topology_tree; /* Query the topology map from the platform */ topology_tree = plat_get_power_domain_tree_desc(); /* Populate the power domain arrays using the platform topology map */ populate_power_domain_tree(topology_tree); /* Update the CPU limits for each node in psci_non_cpu_pd_nodes */ psci_update_pwrlvl_limits(); /* Populate the mpidr field of cpu node for this CPU */ psci_cpu_pd_nodes[plat_my_core_pos()].mpidr = read_mpidr() & MPIDR_AFFINITY_MASK; #if !USE_COHERENT_MEM /* * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in * coherent memory. */ flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes, sizeof(psci_non_cpu_pd_nodes)); #endif flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes, sizeof(psci_cpu_pd_nodes)); psci_init_req_local_pwr_states(); /* * Set the requested and target state of this CPU and all the higher * power domain levels for this CPU to run. */ psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL); plat_setup_psci_ops((uintptr_t)psci_entrypoint, &psci_plat_pm_ops); assert(psci_plat_pm_ops); /* Initialize the psci capability */ psci_caps = PSCI_GENERIC_CAP; if (psci_plat_pm_ops->pwr_domain_off) psci_caps |= define_psci_cap(PSCI_CPU_OFF); if (psci_plat_pm_ops->pwr_domain_on && psci_plat_pm_ops->pwr_domain_on_finish) psci_caps |= define_psci_cap(PSCI_CPU_ON_AARCH64); if (psci_plat_pm_ops->pwr_domain_suspend && psci_plat_pm_ops->pwr_domain_suspend_finish) { psci_caps |= define_psci_cap(PSCI_CPU_SUSPEND_AARCH64); if (psci_plat_pm_ops->get_sys_suspend_power_state) psci_caps |= define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64); } if (psci_plat_pm_ops->system_off) psci_caps |= define_psci_cap(PSCI_SYSTEM_OFF); if (psci_plat_pm_ops->system_reset) psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET); return 0; }