/*******************************************************************************
 * Generic handler which is called when a cpu is physically powered on. It
 * traverses the node information and finds the highest power level powered
 * off and performs generic, architectural, platform setup and state management
 * to power on that power level and power levels below it.
 * e.g. For a cpu that's been powered on, it will call the platform specific
 * code to enable the gic cpu interface and for a cluster it will enable
 * coherency at the interconnect level in addition to gic cpu interface.
 ******************************************************************************/
void psci_power_up_finish(void)
{
	unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };

	/*
	 * Verify that we have been explicitly turned ON or resumed from
	 * suspend.
	 */
	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
		ERROR("Unexpected affinity info state");
		panic();
	}

	/*
	 * Get the maximum power domain level to traverse to after this cpu
	 * has been physically powered up.
	 */
	end_pwrlvl = get_power_on_target_pwrlvl();

	/*
	 * This function acquires the lock corresponding to each power level so
	 * that by the time all locks are taken, the system topology is snapshot
	 * and state management can be done safely.
	 */
	psci_acquire_pwr_domain_locks(end_pwrlvl,
				      cpu_idx);

	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);

	/*
	 * This CPU could be resuming from suspend or it could have just been
	 * turned on. To distinguish between these 2 cases, we examine the
	 * affinity state of the CPU:
	 *  - If the affinity state is ON_PENDING then it has just been
	 *    turned on.
	 *  - Else it is resuming from suspend.
	 *
	 * Depending on the type of warm reset identified, choose the right set
	 * of power management handler and perform the generic, architecture
	 * and platform specific handling.
	 */
	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
		psci_cpu_on_finish(cpu_idx, &state_info);
	else
		psci_cpu_suspend_finish(cpu_idx, &state_info);

	/*
	 * Set the requested and target state of this CPU and all the higher
	 * power domains which are ancestors of this CPU to run.
	 */
	psci_set_pwr_domains_to_run(end_pwrlvl);

	/*
	 * This loop releases the lock corresponding to each power level
	 * in the reverse order to which they were acquired.
	 */
	psci_release_pwr_domain_locks(end_pwrlvl,
				      cpu_idx);
}
/*******************************************************************************
 * This function does the architectural setup and takes the warm boot
 * entry-point `mailbox_ep` as an argument. The function also initializes the
 * power domain topology tree by querying the platform. The power domain nodes
 * higher than the CPU are populated in the array psci_non_cpu_pd_nodes[] and
 * the CPU power domains are populated in psci_cpu_pd_nodes[]. The platform
 * exports its static topology map through the
 * populate_power_domain_topology_tree() API. The algorithm populates the
 * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
 * topology map.  On a platform that implements two clusters of 2 cpus each,
 * and supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would
 * look like this:
 *
 * ---------------------------------------------------
 * | system node | cluster 0 node  | cluster 1 node  |
 * ---------------------------------------------------
 *
 * And populated psci_cpu_pd_nodes would look like this :
 * <-    cpus cluster0   -><-   cpus cluster1   ->
 * ------------------------------------------------
 * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
 * ------------------------------------------------
 ******************************************************************************/
int __init psci_setup(const psci_lib_args_t *lib_args)
{
	const unsigned char *topology_tree;

	assert(VERIFY_PSCI_LIB_ARGS_V1(lib_args));

	/* Do the Architectural initialization */
	psci_arch_setup();

	/* Query the topology map from the platform */
	topology_tree = plat_get_power_domain_tree_desc();

	/* Populate the power domain arrays using the platform topology map */
	populate_power_domain_tree(topology_tree);

	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
	psci_update_pwrlvl_limits();

	/* Populate the mpidr field of cpu node for this CPU */
	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
		read_mpidr() & MPIDR_AFFINITY_MASK;

	psci_init_req_local_pwr_states();

	/*
	 * Set the requested and target state of this CPU and all the higher
	 * power domain levels for this CPU to run.
	 */
	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);

	(void) plat_setup_psci_ops((uintptr_t)lib_args->mailbox_ep,
				   &psci_plat_pm_ops);
	assert(psci_plat_pm_ops != NULL);

	/*
	 * Flush `psci_plat_pm_ops` as it will be accessed by secondary CPUs
	 * during warm boot, possibly before data cache is enabled.
	 */
	psci_flush_dcache_range((uintptr_t)&psci_plat_pm_ops,
					sizeof(psci_plat_pm_ops));

	/* Initialize the psci capability */
	psci_caps = PSCI_GENERIC_CAP;

	if (psci_plat_pm_ops->pwr_domain_off != NULL)
		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
	if ((psci_plat_pm_ops->pwr_domain_on != NULL) &&
	    (psci_plat_pm_ops->pwr_domain_on_finish != NULL))
		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
	if ((psci_plat_pm_ops->pwr_domain_suspend != NULL) &&
	    (psci_plat_pm_ops->pwr_domain_suspend_finish != NULL)) {
		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
		if (psci_plat_pm_ops->get_sys_suspend_power_state != NULL)
			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
	}
	if (psci_plat_pm_ops->system_off != NULL)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
	if (psci_plat_pm_ops->system_reset != NULL)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);
	if (psci_plat_pm_ops->get_node_hw_state != NULL)
		psci_caps |= define_psci_cap(PSCI_NODE_HW_STATE_AARCH64);
	if ((psci_plat_pm_ops->read_mem_protect != NULL) &&
			(psci_plat_pm_ops->write_mem_protect != NULL))
		psci_caps |= define_psci_cap(PSCI_MEM_PROTECT);
	if (psci_plat_pm_ops->mem_protect_chk != NULL)
		psci_caps |= define_psci_cap(PSCI_MEM_CHK_RANGE_AARCH64);
	if (psci_plat_pm_ops->system_reset2 != NULL)
		psci_caps |= define_psci_cap(PSCI_SYSTEM_RESET2_AARCH64);

#if ENABLE_PSCI_STAT
	psci_caps |=  define_psci_cap(PSCI_STAT_RESIDENCY_AARCH64);
	psci_caps |=  define_psci_cap(PSCI_STAT_COUNT_AARCH64);
#endif

	return 0;
}
/*******************************************************************************
 * This function initializes the power domain topology tree by querying the
 * platform. The power domain nodes higher than the CPU are populated in the
 * array psci_non_cpu_pd_nodes[] and the CPU power domains are populated in
 * psci_cpu_pd_nodes[]. The platform exports its static topology map through the
 * populate_power_domain_topology_tree() API. The algorithm populates the
 * psci_non_cpu_pd_nodes and psci_cpu_pd_nodes iteratively by using this
 * topology map.  On a platform that implements two clusters of 2 cpus each, and
 * supporting 3 domain levels, the populated psci_non_cpu_pd_nodes would look
 * like this:
 *
 * ---------------------------------------------------
 * | system node | cluster 0 node  | cluster 1 node  |
 * ---------------------------------------------------
 *
 * And populated psci_cpu_pd_nodes would look like this :
 * <-    cpus cluster0   -><-   cpus cluster1   ->
 * ------------------------------------------------
 * |   CPU 0   |   CPU 1   |   CPU 2   |   CPU 3  |
 * ------------------------------------------------
 ******************************************************************************/
int psci_setup(void)
{
	const unsigned char *topology_tree;

	/* Query the topology map from the platform */
	topology_tree = plat_get_power_domain_tree_desc();

	/* Populate the power domain arrays using the platform topology map */
	populate_power_domain_tree(topology_tree);

	/* Update the CPU limits for each node in psci_non_cpu_pd_nodes */
	psci_update_pwrlvl_limits();

	/* Populate the mpidr field of cpu node for this CPU */
	psci_cpu_pd_nodes[plat_my_core_pos()].mpidr =
		read_mpidr() & MPIDR_AFFINITY_MASK;

#if !USE_COHERENT_MEM
	/*
	 * The psci_non_cpu_pd_nodes only needs flushing when it's not allocated in
	 * coherent memory.
	 */
	flush_dcache_range((uintptr_t) &psci_non_cpu_pd_nodes,
			   sizeof(psci_non_cpu_pd_nodes));
#endif

	flush_dcache_range((uintptr_t) &psci_cpu_pd_nodes,
			   sizeof(psci_cpu_pd_nodes));

	psci_init_req_local_pwr_states();

	/*
	 * Set the requested and target state of this CPU and all the higher
	 * power domain levels for this CPU to run.
	 */
	psci_set_pwr_domains_to_run(PLAT_MAX_PWR_LVL);

	plat_setup_psci_ops((uintptr_t)psci_entrypoint,
					&psci_plat_pm_ops);
	assert(psci_plat_pm_ops);

	/* Initialize the psci capability */
	psci_caps = PSCI_GENERIC_CAP;

	if (psci_plat_pm_ops->pwr_domain_off)
		psci_caps |=  define_psci_cap(PSCI_CPU_OFF);
	if (psci_plat_pm_ops->pwr_domain_on &&
			psci_plat_pm_ops->pwr_domain_on_finish)
		psci_caps |=  define_psci_cap(PSCI_CPU_ON_AARCH64);
	if (psci_plat_pm_ops->pwr_domain_suspend &&
			psci_plat_pm_ops->pwr_domain_suspend_finish) {
		psci_caps |=  define_psci_cap(PSCI_CPU_SUSPEND_AARCH64);
		if (psci_plat_pm_ops->get_sys_suspend_power_state)
			psci_caps |=  define_psci_cap(PSCI_SYSTEM_SUSPEND_AARCH64);
	}
	if (psci_plat_pm_ops->system_off)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_OFF);
	if (psci_plat_pm_ops->system_reset)
		psci_caps |=  define_psci_cap(PSCI_SYSTEM_RESET);

	return 0;
}