/*******************************************************************************
 * Generic handler which is called when a cpu is physically powered on. It
 * traverses the node information and finds the highest power level powered
 * off and performs generic, architectural, platform setup and state management
 * to power on that power level and power levels below it.
 * e.g. For a cpu that's been powered on, it will call the platform specific
 * code to enable the gic cpu interface and for a cluster it will enable
 * coherency at the interconnect level in addition to gic cpu interface.
 ******************************************************************************/
void psci_power_up_finish(void)
{
	unsigned int end_pwrlvl, cpu_idx = plat_my_core_pos();
	psci_power_state_t state_info = { {PSCI_LOCAL_STATE_RUN} };

	/*
	 * Verify that we have been explicitly turned ON or resumed from
	 * suspend.
	 */
	if (psci_get_aff_info_state() == AFF_STATE_OFF) {
		ERROR("Unexpected affinity info state");
		panic();
	}

	/*
	 * Get the maximum power domain level to traverse to after this cpu
	 * has been physically powered up.
	 */
	end_pwrlvl = get_power_on_target_pwrlvl();

	/*
	 * This function acquires the lock corresponding to each power level so
	 * that by the time all locks are taken, the system topology is snapshot
	 * and state management can be done safely.
	 */
	psci_acquire_pwr_domain_locks(end_pwrlvl,
				      cpu_idx);

	psci_get_target_local_pwr_states(end_pwrlvl, &state_info);

	/*
	 * This CPU could be resuming from suspend or it could have just been
	 * turned on. To distinguish between these 2 cases, we examine the
	 * affinity state of the CPU:
	 *  - If the affinity state is ON_PENDING then it has just been
	 *    turned on.
	 *  - Else it is resuming from suspend.
	 *
	 * Depending on the type of warm reset identified, choose the right set
	 * of power management handler and perform the generic, architecture
	 * and platform specific handling.
	 */
	if (psci_get_aff_info_state() == AFF_STATE_ON_PENDING)
		psci_cpu_on_finish(cpu_idx, &state_info);
	else
		psci_cpu_suspend_finish(cpu_idx, &state_info);

	/*
	 * Set the requested and target state of this CPU and all the higher
	 * power domains which are ancestors of this CPU to run.
	 */
	psci_set_pwr_domains_to_run(end_pwrlvl);

	/*
	 * This loop releases the lock corresponding to each power level
	 * in the reverse order to which they were acquired.
	 */
	psci_release_pwr_domain_locks(end_pwrlvl,
				      cpu_idx);
}
/******************************************************************************
 * Top level handler which is called when a cpu wants to power itself down.
 * It's assumed that along with turning the cpu power domain off, power
 * domains at higher levels will be turned off as far as possible. It finds
 * the highest level where a domain has to be powered off by traversing the
 * node information and then performs generic, architectural, platform setup
 * and state management required to turn OFF that power domain and domains
 * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
 * the power controller whereas for a cluster that's to be powered off, it will
 * call the platform specific code which will disable coherency at the
 * interconnect level if the cpu is the last in the cluster and also the
 * program the power controller.
 ******************************************************************************/
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
	int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
	psci_power_state_t state_info;

	/*
	 * This function must only be called on platforms where the
	 * CPU_OFF platform hooks have been implemented.
	 */
	assert(psci_plat_pm_ops->pwr_domain_off);

	/*
	 * This function acquires the lock corresponding to each power
	 * level so that by the time all locks are taken, the system topology
	 * is snapshot and state management can be done safely.
	 */
	psci_acquire_pwr_domain_locks(end_pwrlvl,
				      idx);

	/*
	 * Call the cpu off handler registered by the Secure Payload Dispatcher
	 * to let it do any bookkeeping. Assume that the SPD always reports an
	 * E_DENIED error if SP refuse to power down
	 */
	if (psci_spd_pm && psci_spd_pm->svc_off) {
		rc = psci_spd_pm->svc_off(0);
		if (rc)
			goto exit;
	}

	/* Construct the psci_power_state for CPU_OFF */
	psci_set_power_off_state(&state_info);

	/*
	 * This function is passed the requested state info and
	 * it returns the negotiated state info for each power level upto
	 * the end level specified.
	 */
	psci_do_state_coordination(end_pwrlvl, &state_info);

#if ENABLE_PSCI_STAT
	/* Update the last cpu for each level till end_pwrlvl */
	psci_stats_update_pwr_down(end_pwrlvl, &state_info);
#endif

	/*
	 * Arch. management. Perform the necessary steps to flush all
	 * cpu caches.
	 */
	psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));

	/*
	 * Plat. management: Perform platform specific actions to turn this
	 * cpu off e.g. exit cpu coherency, program the power controller etc.
	 */
	psci_plat_pm_ops->pwr_domain_off(&state_info);

#if ENABLE_PSCI_STAT
	/*
	 * Capture time-stamp while entering low power state.
	 * No cache maintenance needed because caches are off
	 * and writes are direct to main memory.
	 */
	PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
		PMF_NO_CACHE_MAINT);
#endif

exit:
	/*
	 * Release the locks corresponding to each power level in the
	 * reverse order to which they were acquired.
	 */
	psci_release_pwr_domain_locks(end_pwrlvl,
				      idx);

	/*
	 * Check if all actions needed to safely power down this cpu have
	 * successfully completed.
	 */
	if (rc == PSCI_E_SUCCESS) {
		/*
		 * Set the affinity info state to OFF. This writes directly to
		 * main memory as caches are disabled, so cache maintenance is
		 * required to ensure that later cached reads of aff_info_state
		 * return AFF_STATE_OFF.  A dsbish() ensures ordering of the
		 * update to the affinity info state prior to cache line
		 * invalidation.
		 */
		flush_cpu_data(psci_svc_cpu_data.aff_info_state);
		psci_set_aff_info_state(AFF_STATE_OFF);
		dsbish();
		inv_cpu_data(psci_svc_cpu_data.aff_info_state);

#if ENABLE_RUNTIME_INSTRUMENTATION

		/*
		 * Update the timestamp with cache off.  We assume this
		 * timestamp can only be read from the current CPU and the
		 * timestamp cache line will be flushed before return to
		 * normal world on wakeup.
		 */
		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
		    RT_INSTR_ENTER_HW_LOW_PWR,
		    PMF_NO_CACHE_MAINT);
#endif

		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
			/* This function must not return */
			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
		} else {
			/*
			 * Enter a wfi loop which will allow the power
			 * controller to physically power down this cpu.
			 */
			psci_power_down_wfi();
		}
	}

	return rc;
}