コード例 #1
0
static int psci_afflvl1_off(aff_map_node_t *cluster_node)
{
	int rc;

	/* Sanity check the cluster level */
	assert(cluster_node->level == MPIDR_AFFLVL1);

	if (!psci_plat_pm_ops->affinst_off)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. Management. Allow the platform to do its cluster
	 * specific bookeeping e.g. turn off interconnect coherency,
	 * program the power controller etc.
	 */
	rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
					     cluster_node->level,
					     psci_get_phys_state(cluster_node));

	/*
	 * Arch. Management. Flush all levels of caches to PoC if
	 * the cluster is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);

	return rc;
}
コード例 #2
0
static int psci_afflvl2_off(aff_map_node_t *system_node)
{
	int rc;

	/* Cannot go beyond this level */
	assert(system_node->level == MPIDR_AFFLVL2);

	/*
	 * Keep the physical state of the system handy to decide what
	 * action needs to be taken
	 */

	if (!psci_plat_pm_ops->affinst_off)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. Management : Allow the platform to do its bookeeping
	 * at this affinity level
	 */
	rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
					     system_node->level,
					     psci_get_phys_state(system_node));

	/*
	 * Arch. Management. Flush all levels of caches to PoC if
	 * the system is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);

	return rc;
}
コード例 #3
0
static int psci_afflvl2_suspend(aff_map_node_t *system_node,
				unsigned long ns_entrypoint,
				unsigned long context_id,
				unsigned int power_state)
{
	unsigned int plat_state;
	unsigned long psci_entrypoint;
	int rc;

	/* Cannot go beyond this */
	assert(system_node->level == MPIDR_AFFLVL2);

	/*
	 * Keep the physical state of the system handy to decide what
	 * action needs to be taken
	 */
	plat_state = psci_get_phys_state(system_node);

	/*
	 * Plat. Management : Allow the platform to do its bookeeping
	 * at this affinity level
	 */
	if (!psci_plat_pm_ops->affinst_suspend)
		return PSCI_E_SUCCESS;

	/*
	 * Sending the psci entrypoint is currently redundant
	 * beyond affinity level 0 but one never knows what a
	 * platform might do. Also it allows us to keep the
	 * platform handler prototype the same.
	 */
	plat_state = psci_get_phys_state(system_node);
	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
	rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
						 psci_entrypoint,
						 ns_entrypoint,
						 system_node->level,
						 plat_state);
	/*
	 * Arch. management: Flush all levels of caches to PoC if the
	 * system is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);

	return rc;
}
コード例 #4
0
/*******************************************************************************
 * The next three functions implement a handler for each supported affinity
 * level which is called when that affinity level is turned off.
 ******************************************************************************/
static int psci_afflvl0_off(aff_map_node_t *cpu_node)
{
	int rc;

	assert(cpu_node->level == MPIDR_AFFLVL0);

	/*
	 * Generic management: Get the index for clearing any lingering re-entry
	 * information and allow the secure world to switch itself off
	 */

	/*
	 * Call the cpu off handler registered by the Secure Payload Dispatcher
	 * to let it do any bookeeping. Assume that the SPD always reports an
	 * E_DENIED error if SP refuse to power down
	 */
	if (psci_spd_pm && psci_spd_pm->svc_off) {
		rc = psci_spd_pm->svc_off(0);
		if (rc)
			return rc;
	}

	if (!psci_plat_pm_ops->affinst_off)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. management: Perform platform specific actions to turn this
	 * cpu off e.g. exit cpu coherency, program the power controller etc.
	 */
	rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
					     cpu_node->level,
					     psci_get_phys_state(cpu_node));
	/*
	 * Arch. management. Perform the necessary steps to flush all
	 * cpu caches.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);

	return rc;
}
コード例 #5
0
static int psci_afflvl1_suspend(aff_map_node_t *cluster_node,
				unsigned long ns_entrypoint,
				unsigned long context_id,
				unsigned int power_state)
{
	unsigned int plat_state;
	unsigned long psci_entrypoint;
	int rc;

	/* Sanity check the cluster level */
	assert(cluster_node->level == MPIDR_AFFLVL1);

	if (!psci_plat_pm_ops->affinst_suspend)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. Management. Allow the platform to do its cluster specific
	 * bookeeping e.g. turn off interconnect coherency, program the power
	 * controller etc. Sending the psci entrypoint is currently redundant
	 * beyond affinity level 0 but one never knows what a platform might
	 * do. Also it allows us to keep the platform handler prototype the
	 * same.
	 */
	plat_state = psci_get_phys_state(cluster_node);
	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
	rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
						 psci_entrypoint,
						 ns_entrypoint,
						 cluster_node->level,
						 plat_state);
	/*
	 * Arch. management: Flush all levels of caches to PoC if the
	 * cluster is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);

	return rc;
}
コード例 #6
0
/*******************************************************************************
 * The next three functions implement a handler for each supported affinity
 * level which is called when that affinity level is about to be suspended.
 ******************************************************************************/
static int psci_afflvl0_suspend(aff_map_node_t *cpu_node,
				unsigned long ns_entrypoint,
				unsigned long context_id,
				unsigned int power_state)
{
	unsigned long psci_entrypoint;
	uint32_t ns_scr_el3 = read_scr_el3();
	uint32_t ns_sctlr_el1 = read_sctlr_el1();
	int rc;

	/* Sanity check to safeguard against data corruption */
	assert(cpu_node->level == MPIDR_AFFLVL0);

	/* Save PSCI power state parameter for the core in suspend context */
	psci_set_suspend_power_state(power_state);

	/*
	 * Generic management: Store the re-entry information for the non-secure
	 * world and allow the secure world to suspend itself
	 */

	/*
	 * Call the cpu suspend handler registered by the Secure Payload
	 * Dispatcher to let it do any bookeeping. If the handler encounters an
	 * error, it's expected to assert within
	 */
	if (psci_spd_pm && psci_spd_pm->svc_suspend)
		psci_spd_pm->svc_suspend(power_state);

	/*
	 * Generic management: Store the re-entry information for the
	 * non-secure world
	 */
	rc = psci_save_ns_entry(read_mpidr_el1(), ns_entrypoint, context_id,
				ns_scr_el3, ns_sctlr_el1);
	if (rc != PSCI_E_SUCCESS)
		return rc;

	/* Set the secure world (EL3) re-entry point after BL1 */
	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;

	if (!psci_plat_pm_ops->affinst_suspend)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. management: Allow the platform to perform the
	 * necessary actions to turn off this cpu e.g. set the
	 * platform defined mailbox with the psci entrypoint,
	 * program the power controller etc.
	 */
	rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
						 psci_entrypoint,
						 ns_entrypoint,
						 cpu_node->level,
						 psci_get_phys_state(cpu_node));

	/*
	 * Arch. management. Perform the necessary steps to flush all
	 * cpu caches.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);

	return rc;
}
コード例 #7
0
/******************************************************************************
 * Top level handler which is called when a cpu wants to power itself down.
 * It's assumed that along with turning the cpu power domain off, power
 * domains at higher levels will be turned off as far as possible. It finds
 * the highest level where a domain has to be powered off by traversing the
 * node information and then performs generic, architectural, platform setup
 * and state management required to turn OFF that power domain and domains
 * below it. e.g. For a cpu that's to be powered OFF, it could mean programming
 * the power controller whereas for a cluster that's to be powered off, it will
 * call the platform specific code which will disable coherency at the
 * interconnect level if the cpu is the last in the cluster and also the
 * program the power controller.
 ******************************************************************************/
int psci_do_cpu_off(unsigned int end_pwrlvl)
{
	int rc = PSCI_E_SUCCESS, idx = plat_my_core_pos();
	psci_power_state_t state_info;

	/*
	 * This function must only be called on platforms where the
	 * CPU_OFF platform hooks have been implemented.
	 */
	assert(psci_plat_pm_ops->pwr_domain_off);

	/*
	 * This function acquires the lock corresponding to each power
	 * level so that by the time all locks are taken, the system topology
	 * is snapshot and state management can be done safely.
	 */
	psci_acquire_pwr_domain_locks(end_pwrlvl,
				      idx);

	/*
	 * Call the cpu off handler registered by the Secure Payload Dispatcher
	 * to let it do any bookkeeping. Assume that the SPD always reports an
	 * E_DENIED error if SP refuse to power down
	 */
	if (psci_spd_pm && psci_spd_pm->svc_off) {
		rc = psci_spd_pm->svc_off(0);
		if (rc)
			goto exit;
	}

	/* Construct the psci_power_state for CPU_OFF */
	psci_set_power_off_state(&state_info);

	/*
	 * This function is passed the requested state info and
	 * it returns the negotiated state info for each power level upto
	 * the end level specified.
	 */
	psci_do_state_coordination(end_pwrlvl, &state_info);

#if ENABLE_PSCI_STAT
	/* Update the last cpu for each level till end_pwrlvl */
	psci_stats_update_pwr_down(end_pwrlvl, &state_info);
#endif

	/*
	 * Arch. management. Perform the necessary steps to flush all
	 * cpu caches.
	 */
	psci_do_pwrdown_cache_maintenance(psci_find_max_off_lvl(&state_info));

	/*
	 * Plat. management: Perform platform specific actions to turn this
	 * cpu off e.g. exit cpu coherency, program the power controller etc.
	 */
	psci_plat_pm_ops->pwr_domain_off(&state_info);

#if ENABLE_PSCI_STAT
	/*
	 * Capture time-stamp while entering low power state.
	 * No cache maintenance needed because caches are off
	 * and writes are direct to main memory.
	 */
	PMF_CAPTURE_TIMESTAMP(psci_svc, PSCI_STAT_ID_ENTER_LOW_PWR,
		PMF_NO_CACHE_MAINT);
#endif

exit:
	/*
	 * Release the locks corresponding to each power level in the
	 * reverse order to which they were acquired.
	 */
	psci_release_pwr_domain_locks(end_pwrlvl,
				      idx);

	/*
	 * Check if all actions needed to safely power down this cpu have
	 * successfully completed.
	 */
	if (rc == PSCI_E_SUCCESS) {
		/*
		 * Set the affinity info state to OFF. This writes directly to
		 * main memory as caches are disabled, so cache maintenance is
		 * required to ensure that later cached reads of aff_info_state
		 * return AFF_STATE_OFF.  A dsbish() ensures ordering of the
		 * update to the affinity info state prior to cache line
		 * invalidation.
		 */
		flush_cpu_data(psci_svc_cpu_data.aff_info_state);
		psci_set_aff_info_state(AFF_STATE_OFF);
		dsbish();
		inv_cpu_data(psci_svc_cpu_data.aff_info_state);

#if ENABLE_RUNTIME_INSTRUMENTATION

		/*
		 * Update the timestamp with cache off.  We assume this
		 * timestamp can only be read from the current CPU and the
		 * timestamp cache line will be flushed before return to
		 * normal world on wakeup.
		 */
		PMF_CAPTURE_TIMESTAMP(rt_instr_svc,
		    RT_INSTR_ENTER_HW_LOW_PWR,
		    PMF_NO_CACHE_MAINT);
#endif

		if (psci_plat_pm_ops->pwr_domain_pwr_down_wfi) {
			/* This function must not return */
			psci_plat_pm_ops->pwr_domain_pwr_down_wfi(&state_info);
		} else {
			/*
			 * Enter a wfi loop which will allow the power
			 * controller to physically power down this cpu.
			 */
			psci_power_down_wfi();
		}
	}

	return rc;
}