コード例 #1
0
/******************************************************************************
 * This function validates a suspend request by making sure that if a standby
 * state is requested then no power level is turned off and the highest power
 * level is placed in a standby/retention state.
 *
 * It also ensures that the state level X will enter is not shallower than the
 * state level X + 1 will enter.
 *
 * This validation will be enabled only for DEBUG builds as the platform is
 * expected to perform these validations as well.
 *****************************************************************************/
int psci_validate_suspend_req(const psci_power_state_t *state_info,
			      unsigned int is_power_down_state)
{
	unsigned int max_off_lvl, target_lvl, max_retn_lvl;
	plat_local_state_t state;
	plat_local_state_type_t req_state_type, deepest_state_type;
	int i;

	/* Find the target suspend power level */
	target_lvl = psci_find_target_suspend_lvl(state_info);
	if (target_lvl == PSCI_INVALID_PWR_LVL)
		return PSCI_E_INVALID_PARAMS;

	/* All power domain levels are in a RUN state to begin with */
	deepest_state_type = STATE_TYPE_RUN;

	for (i = target_lvl; i >= PSCI_CPU_PWR_LVL; i--) {
		state = state_info->pwr_domain_state[i];
		req_state_type = find_local_state_type(state);

		/*
		 * While traversing from the highest power level to the lowest,
		 * the state requested for lower levels has to be the same or
		 * deeper i.e. equal to or greater than the state at the higher
		 * levels. If this condition is true, then the requested state
		 * becomes the deepest state encountered so far.
		 */
		if (req_state_type < deepest_state_type)
			return PSCI_E_INVALID_PARAMS;
		deepest_state_type = req_state_type;
	}

	/* Find the highest off power level */
	max_off_lvl = psci_find_max_off_lvl(state_info);

	/* The target_lvl is either equal to the max_off_lvl or max_retn_lvl */
	max_retn_lvl = PSCI_INVALID_PWR_LVL;
	if (target_lvl != max_off_lvl)
		max_retn_lvl = target_lvl;

	/*
	 * If this is not a request for a power down state then max off level
	 * has to be invalid and max retention level has to be a valid power
	 * level.
	 */
	if (!is_power_down_state && (max_off_lvl != PSCI_INVALID_PWR_LVL ||
				    max_retn_lvl == PSCI_INVALID_PWR_LVL))
		return PSCI_E_INVALID_PARAMS;

	return PSCI_E_SUCCESS;
}
コード例 #2
0
/******************************************************************************
 * Top level handler which is called when a cpu wants to power itself down.
 * It's assumed that along with turning the cpu power domain off, power
 * domains at higher levels will be turned off as far as possible. It finds
 * the highest level where a domain has to be powered off by traversing the
 * node information and then performs generic, architectural, platform setup
 * and state management required to turn OFF that power domain and domains
 * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
 * the power controller whereas for a cluster that's to be powered off, it will
 * call the platform specific code which will disable coherency at the
 * interconnect level if the cpu is the last in the cluster and also the
 * program the power controller.
 ******************************************************************************/
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
	int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
	psci_power_state_t state_info;

	/*
	 * This function must only be called on platforms where the
	 * CPU_OFF platform hooks have been implemented.
	 */
	assert(psci_plat_pm_ops->pwr_domain_off);

	/*
	 * This function acquires the lock corresponding to each power
	 * level so that by the time all locks are taken, the system topology
	 * is snapshot and state management can be done safely.
	 */
	psci_acquire_pwr_domain_locks(end_pwrlvl,
				      idx);

	/*
	 * Call the cpu off handler registered by the Secure Payload Dispatcher
	 * to let it do any bookkeeping. Assume that the SPD always reports an
	 * E_DENIED error if SP refuse to power down
	 */
	if (psci_spd_pm && psci_spd_pm->svc_off) {
		rc = psci_spd_pm->svc_off(0);
		if (rc)
			goto exit;
	}

	/* Construct the psci_power_state for CPU_OFF */
	psci_set_power_off_state(&state_info);

	/*
	 * This function is passed the requested state info and
	 * it returns the negotiated state info for each power level upto
	 * the end level specified.
	 */
	psci_do_state_coordination(end_pwrlvl, &state_info);

#if ENABLE_PSCI_STAT
	/* Update the last cpu for each level till end_pwrlvl */
	psci_stats_update_pwr_down(end_pwrlvl, &state_info);
#endif

	/*
	 * Arch. management. Perform the necessary steps to flush all
	 * cpu caches.
	 */
	psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));

	/*
	 * Plat. management: Perform platform specific actions to turn this
	 * cpu off e.g. exit cpu coherency, program the power controller etc.
	 */
	psci_plat_pm_ops->pwr_domain_off(&state_info);

#if ENABLE_PSCI_STAT
	/*
	 * Capture time-stamp while entering low power state.
	 * No cache maintenance needed because caches are off
	 * and writes are direct to main memory.
	 */
	PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
		PMF_NO_CACHE_MAINT);
#endif

exit:
	/*
	 * Release the locks corresponding to each power level in the
	 * reverse order to which they were acquired.
	 */
	psci_release_pwr_domain_locks(end_pwrlvl,
				      idx);

	/*
	 * Check if all actions needed to safely power down this cpu have
	 * successfully completed.
	 */
	if (rc == PSCI_E_SUCCESS) {
		/*
		 * Set the affinity info state to OFF. This writes directly to
		 * main memory as caches are disabled, so cache maintenance is
		 * required to ensure that later cached reads of aff_info_state
		 * return AFF_STATE_OFF.  A dsbish() ensures ordering of the
		 * update to the affinity info state prior to cache line
		 * invalidation.
		 */
		flush_cpu_data(psci_svc_cpu_data.aff_info_state);
		psci_set_aff_info_state(AFF_STATE_OFF);
		dsbish();
		inv_cpu_data(psci_svc_cpu_data.aff_info_state);

#if ENABLE_RUNTIME_INSTRUMENTATION

		/*
		 * Update the timestamp with cache off.  We assume this
		 * timestamp can only be read from the current CPU and the
		 * timestamp cache line will be flushed before return to
		 * normal world on wakeup.
		 */
		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
		    RT_INSTR_ENTER_HW_LOW_PWR,
		    PMF_NO_CACHE_MAINT);
#endif

		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
			/* This function must not return */
			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
		} else {
			/*
			 * Enter a wfi loop which will allow the power
			 * controller to physically power down this cpu.
			 */
			psci_power_down_wfi();
		}
	}

	return rc;
}