Ejemplo n.º 1
0
/*******************************************************************************
 * This function verifies that the all the other cores in the system have been
 * turned OFF and the current CPU is the last running CPU in the system.
 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
 * otherwise.
 ******************************************************************************/
unsigned int psci_is_last_on_cpu(void)
{
	unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
	unsigned int i;

	for (i = psci_aff_limits[MPIDR_AFFLVL0].min;
			i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) {

		assert(psci_aff_map[i].level == MPIDR_AFFLVL0);

		if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT))
			continue;

		if (psci_aff_map[i].mpidr == mpidr) {
			assert(psci_get_state(&psci_aff_map[i])
					== PSCI_STATE_ON);
			continue;
		}

		if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF)
			return 0;
	}

	return 1;
}
Ejemplo n.º 2
0
int psci_affinity_info(unsigned long target_affinity,
		       unsigned int lowest_affinity_level)
{
	int rc = PSCI_E_INVALID_PARAMS;
	unsigned int aff_state;
	aff_map_node *node;

	if (lowest_affinity_level > get_max_afflvl())
		return rc;

	node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
	if (node && (node->state & PSCI_AFF_PRESENT)) {

		/*
		 * TODO: For affinity levels higher than 0 i.e. cpu, the
		 * state will always be either ON or OFF. Need to investigate
		 * how critical is it to support ON_PENDING here.
		 */
		aff_state = psci_get_state(node);

		/* A suspended cpu is available & on for the OS */
		if (aff_state == PSCI_STATE_SUSPEND) {
			aff_state = PSCI_STATE_ON;
		}

		rc = aff_state;
	}

	return rc;
}
Ejemplo n.º 3
0
int psci_affinity_info(unsigned long target_affinity,
		       unsigned int lowest_affinity_level)
{
	int rc = PSCI_E_INVALID_PARAMS;
	unsigned int aff_state;
	aff_map_node *node;

	if (lowest_affinity_level > get_max_afflvl()) {
		goto exit;
	}

	node = psci_get_aff_map_node(target_affinity, lowest_affinity_level);
	if (node && (node->state & PSCI_AFF_PRESENT)) {
		aff_state = psci_get_state(node->state);

		/* A suspended cpu is available & on for the OS */
		if (aff_state == PSCI_STATE_SUSPEND) {
			aff_state = PSCI_STATE_ON;
		}

		rc = aff_state;
	}
exit:
	return rc;
}
Ejemplo n.º 4
0
/*******************************************************************************
 * This function prints the state of all affinity instances present in the
 * system
 ******************************************************************************/
void psci_print_affinity_map(void)
{
#if LOG_LEVEL >= LOG_LEVEL_INFO
	aff_map_node_t *node;
	unsigned int idx;
	/* This array maps to the PSCI_STATE_X definitions in psci.h */
	static const char *psci_state_str[] = {
		"ON",
		"OFF",
		"ON_PENDING",
		"SUSPEND"
	};

	INFO("PSCI Affinity Map:\n");
	for (idx = 0; idx < PSCI_NUM_AFFS ; idx++) {
		node = &psci_aff_map[idx];
		if (!(node->state & PSCI_AFF_PRESENT)) {
			continue;
		}
		INFO("  AffInst: Level %u, MPID 0x%lx, State %s\n",
				node->level, node->mpidr,
				psci_state_str[psci_get_state(node)]);
	}
#endif
}
Ejemplo n.º 5
0
/*******************************************************************************
 * An affinity level could be on, on_pending, suspended or off. These are the
 * logical states it can be in. Physically either it is off or on. When it is in
 * the state on_pending then it is about to be turned on. It is not possible to
 * tell whether that's actually happenned or not. So we err on the side of
 * caution & treat the affinity level as being turned off.
 ******************************************************************************/
unsigned short psci_get_phys_state(aff_map_node_t *node)
{
	unsigned int state;

	state = psci_get_state(node);
	return get_phys_state(state);
}
Ejemplo n.º 6
0
/*******************************************************************************
 * Routine to return the maximum affinity level to traverse to after a cpu has
 * been physically powered up. It is expected to be called immediately after
 * reset from assembler code.
 ******************************************************************************/
int get_power_on_target_afflvl(void)
{
	int afflvl;

#if DEBUG
	unsigned int state;
	aff_map_node_t *node;

	/* Retrieve our node from the topology tree */
	node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
				     MPIDR_AFFLVL0);
	assert(node);

	/*
	 * Sanity check the state of the cpu. It should be either suspend or "on
	 * pending"
	 */
	state = psci_get_state(node);
	assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING);
#endif

	/*
	 * Assume that this cpu was suspended and retrieve its target affinity
	 * level. If it is invalid then it could only have been turned off
	 * earlier. PLATFORM_MAX_AFFLVL will be the highest affinity level a
	 * cpu can be turned off to.
	 */
	afflvl = psci_get_suspend_afflvl();
	if (afflvl == PSCI_INVALID_DATA)
		afflvl = PLATFORM_MAX_AFFLVL;
	return afflvl;
}
/*******************************************************************************
 * The following functions finish an earlier affinity power on request. They
 * are called by the common finisher routine in psci_common.c.
 ******************************************************************************/
static unsigned int psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
{
	unsigned int plat_state, state, rc;

	assert(cpu_node->level == MPIDR_AFFLVL0);

	/* Ensure we have been explicitly woken up by another cpu */
	state = psci_get_state(cpu_node);
	assert(state == PSCI_STATE_ON_PENDING);

	/*
	 * Plat. management: Perform the platform specific actions
	 * for this cpu e.g. enabling the gic or zeroing the mailbox
	 * register. The actual state of this cpu has already been
	 * changed.
	 */
	if (psci_plat_pm_ops->affinst_on_finish) {

		/* Get the physical state of this cpu */
		plat_state = get_phys_state(state);
		rc = psci_plat_pm_ops->affinst_on_finish(read_mpidr_el1(),
							 cpu_node->level,
							 plat_state);
		assert(rc == PSCI_E_SUCCESS);
	}

	/*
	 * Arch. management: Enable data cache and manage stack memory
	 */
	psci_do_pwrup_cache_maintenance();

	/*
	 * All the platform specific actions for turning this cpu
	 * on have completed. Perform enough arch.initialization
	 * to run in the non-secure address space.
	 */
	bl31_arch_setup();

	/*
	 * Call the cpu on finish handler registered by the Secure Payload
	 * Dispatcher to let it do any bookeeping. If the handler encounters an
	 * error, it's expected to assert within
	 */
	if (psci_spd_pm && psci_spd_pm->svc_on_finish)
		psci_spd_pm->svc_on_finish(0);

	/*
	 * Generic management: Now we just need to retrieve the
	 * information that we had stashed away during the cpu_on
	 * call to set this cpu on its way.
	 */
	cm_prepare_el3_exit(NON_SECURE);

	/* Clean caches before re-entering normal world */
	dcsw_op_louis(DCCSW);

	rc = PSCI_E_SUCCESS;
	return rc;
}
/*******************************************************************************
 * This function checks whether a cpu which has been requested to be turned on
 * is OFF to begin with.
 ******************************************************************************/
static int cpu_on_validate_state(aff_map_node_t *node)
{
	unsigned int psci_state;

	/* Get the raw psci state */
	psci_state = psci_get_state(node);

	if (psci_state == PSCI_STATE_ON || psci_state == PSCI_STATE_SUSPEND)
		return PSCI_E_ALREADY_ON;

	if (psci_state == PSCI_STATE_ON_PENDING)
		return PSCI_E_ON_PENDING;

	assert(psci_state == PSCI_STATE_OFF);
	return PSCI_E_SUCCESS;
}
/*******************************************************************************
 * The following functions finish an earlier affinity suspend request. They
 * are called by the common finisher routine in psci_common.c.
 ******************************************************************************/
static unsigned int psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
{
	unsigned int plat_state, state, rc;
	int32_t suspend_level;
	uint64_t counter_freq;

	assert(cpu_node->level == MPIDR_AFFLVL0);

	/* Ensure we have been woken up from a suspended state */
	state = psci_get_state(cpu_node);
	assert(state == PSCI_STATE_SUSPEND);

	/*
	 * Plat. management: Perform the platform specific actions
	 * before we change the state of the cpu e.g. enabling the
	 * gic or zeroing the mailbox register. If anything goes
	 * wrong then assert as there is no way to recover from this
	 * situation.
	 */
	if (psci_plat_pm_ops->affinst_suspend_finish) {

		/* Get the physical state of this cpu */
		plat_state = get_phys_state(state);
		rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(),
							      cpu_node->level,
							      plat_state);
		assert(rc == PSCI_E_SUCCESS);
	}

	/* Get the index for restoring the re-entry information */
	/*
	 * Arch. management: Enable the data cache, manage stack memory and
	 * restore the stashed EL3 architectural context from the 'cpu_context'
	 * structure for this cpu.
	 */
	psci_do_pwrup_cache_maintenance();

	/* Re-init the cntfrq_el0 register */
	counter_freq = plat_get_syscnt_freq();
	write_cntfrq_el0(counter_freq);

	/*
	 * Call the cpu suspend finish handler registered by the Secure Payload
	 * Dispatcher to let it do any bookeeping. If the handler encounters an
	 * error, it's expected to assert within
	 */
	if (psci_spd_pm && psci_spd_pm->svc_suspend) {
		suspend_level = psci_get_suspend_afflvl();
		assert (suspend_level != PSCI_INVALID_DATA);
		psci_spd_pm->svc_suspend_finish(suspend_level);
	}

	/* Invalidate the suspend context for the node */
	psci_set_suspend_power_state(PSCI_INVALID_DATA);

	/*
	 * Generic management: Now we just need to retrieve the
	 * information that we had stashed away during the suspend
	 * call to set this cpu on its way.
	 */
	cm_prepare_el3_exit(NON_SECURE);

	/* Clean caches before re-entering normal world */
	dcsw_op_louis(DCCSW);

	rc = PSCI_E_SUCCESS;
	return rc;
}