void hikey960_pwr_domain_off(const psci_power_state_t *target_state)
{
	unsigned long mpidr = read_mpidr_el1();
	unsigned int core = mpidr & MPIDR_CPU_MASK;
	unsigned int cluster =
		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;

	clr_ex();
	isb();
	dsbsy();

	gicv2_cpuif_disable();

	hisi_clear_cpu_boot_flag(cluster, core);
	hisi_powerdn_core(cluster, core);

	/* check if any core is powered up */
	if (hisi_test_cpu_down(cluster, core)) {

		cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));

		isb();
		dsbsy();

		hisi_powerdn_cluster(cluster, core);
	}
}
static void hikey960_pwr_domain_suspend(const psci_power_state_t *target_state)
{
	u_register_t mpidr = read_mpidr_el1();
	unsigned int core = mpidr & MPIDR_CPU_MASK;
	unsigned int cluster =
		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;

	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
		return;

	if (CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
		clr_ex();
		isb();
		dsbsy();

		gicv2_cpuif_disable();

		hisi_cpuidle_lock(cluster, core);
		hisi_set_cpuidle_flag(cluster, core);
		hisi_cpuidle_unlock(cluster, core);

		mmio_write_32(CRG_REG_BASE + CRG_RVBAR(cluster, core),
		      hikey960_sec_entrypoint >> 2);

		hisi_enter_core_idle(cluster, core);
	}
/*******************************************************************************
 * This function verifies that the all the other cores in the system have been
 * turned OFF and the current CPU is the last running CPU in the system.
 * Returns 1 (true) if the current CPU is the last ON CPU or 0 (false)
 * otherwise.
 ******************************************************************************/
unsigned int psci_is_last_on_cpu(void)
{
	unsigned long mpidr = read_mpidr_el1() & MPIDR_AFFINITY_MASK;
	unsigned int i;

	for (i = psci_aff_limits[MPIDR_AFFLVL0].min;
			i <= psci_aff_limits[MPIDR_AFFLVL0].max; i++) {

		assert(psci_aff_map[i].level == MPIDR_AFFLVL0);

		if (!(psci_aff_map[i].state & PSCI_AFF_PRESENT))
			continue;

		if (psci_aff_map[i].mpidr == mpidr) {
			assert(psci_get_state(&psci_aff_map[i])
					== PSCI_STATE_ON);
			continue;
		}

		if (psci_get_state(&psci_aff_map[i]) != PSCI_STATE_OFF)
			return 0;
	}

	return 1;
}
static unsigned int psci_afflvl2_on_finish(aff_map_node_t *system_node)
{
	unsigned int plat_state;

	/* Cannot go beyond this affinity level */
	assert(system_node->level == MPIDR_AFFLVL2);

	if (!psci_plat_pm_ops->affinst_on_finish)
		return PSCI_E_SUCCESS;

	/*
	 * Currently, there are no architectural actions to perform
	 * at the system level.
	 */

	/*
	 * Plat. management: Perform the platform specific actions
	 * as per the old state of the cluster e.g. enabling
	 * coherency at the interconnect depends upon the state with
	 * which this cluster was powered up. If anything goes wrong
	 * then assert as there is no way to recover from this
	 * situation.
	 */
	plat_state = psci_get_phys_state(system_node);
	return psci_plat_pm_ops->affinst_on_finish(read_mpidr_el1(),
						   system_node->level,
						   plat_state);
}
Beispiel #5
0
/*
 * Program Priority Mask to the original Non-secure priority such that
 * Non-secure interrupts may preempt Secure execution, viz. during Yielding SMC
 * calls. The 'preempt_ret_code' parameter indicates the Yielding SMC's return
 * value in case the call was preempted.
 *
 * This API is expected to be invoked before delegating a yielding SMC to Secure
 * EL1. I.e. within the window of secure execution after Non-secure context is
 * saved (after entry into EL3) and Secure context is restored (before entering
 * Secure EL1).
 */
void ehf_allow_ns_preemption(uint64_t preempt_ret_code)
{
	cpu_context_t *ns_ctx;
	unsigned int old_pmr __unused;
	pe_exc_data_t *pe_data = this_cpu_data();

	/*
	 * We should have been notified earlier of entering secure world, and
	 * therefore have stashed the Non-secure priority mask.
	 */
	assert(pe_data->ns_pri_mask != 0);

	/* Make sure no priority levels are active when requesting this */
	if (has_valid_pri_activations(pe_data)) {
		ERROR("PE %lx has priority activations: 0x%x\n",
				read_mpidr_el1(), pe_data->active_pri_bits);
		panic();
	}

	/*
	 * Program preempted return code to x0 right away so that, if the
	 * Yielding SMC was indeed preempted before a dispatcher gets a chance
	 * to populate it, the caller would find the correct return value.
	 */
	ns_ctx = cm_get_context(NON_SECURE);
	assert(ns_ctx);
	write_ctx_reg(get_gpregs_ctx(ns_ctx), CTX_GPREG_X0, preempt_ret_code);

	old_pmr = plat_ic_set_priority_mask(pe_data->ns_pri_mask);

	EHF_LOG("Priority Mask: 0x%x => 0x%x\n", old_pmr, pe_data->ns_pri_mask);

	pe_data->ns_pri_mask = 0;
}
static void hikey_pwr_domain_suspend(const psci_power_state_t *target_state)
{
	u_register_t mpidr = read_mpidr_el1();
	unsigned int cpu = mpidr & MPIDR_CPU_MASK;
	unsigned int cluster =
		(mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFFINITY_BITS;

	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
		return;

	if (CORE_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {

		/* Program the jump address for the target cpu */
		hisi_pwrc_set_core_bx_addr(cpu, cluster, hikey_sec_entrypoint);

		gicv2_cpuif_disable();

		if (SYSTEM_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
			hisi_ipc_cpu_suspend(cpu, cluster);
	}

	/* Perform the common cluster specific operations */
	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
		hisi_ipc_spin_lock(HISI_IPC_SEM_CPUIDLE);
		cci_disable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));
		hisi_ipc_spin_unlock(HISI_IPC_SEM_CPUIDLE);

		if (SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
			hisi_pwrc_set_cluster_wfi(1);
			hisi_pwrc_set_cluster_wfi(0);
			hisi_ipc_psci_system_off();
		} else
			hisi_ipc_cluster_suspend(cpu, cluster);
	}
}
static unsigned int psci_afflvl1_suspend_finish(aff_map_node_t *cluster_node)
{
	unsigned int plat_state, rc = PSCI_E_SUCCESS;

	assert(cluster_node->level == MPIDR_AFFLVL1);

	/*
	 * Plat. management: Perform the platform specific actions
	 * as per the old state of the cluster e.g. enabling
	 * coherency at the interconnect depends upon the state with
	 * which this cluster was powered up. If anything goes wrong
	 * then assert as there is no way to recover from this
	 * situation.
	 */
	if (psci_plat_pm_ops->affinst_suspend_finish) {

		/* Get the physical state of this cpu */
		plat_state = psci_get_phys_state(cluster_node);
		rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(),
							      cluster_node->level,
							      plat_state);
		assert(rc == PSCI_E_SUCCESS);
	}

	return rc;
}
static int psci_afflvl2_off(aff_map_node_t *system_node)
{
	int rc;

	/* Cannot go beyond this level */
	assert(system_node->level == MPIDR_AFFLVL2);

	/*
	 * Keep the physical state of the system handy to decide what
	 * action needs to be taken
	 */

	if (!psci_plat_pm_ops->affinst_off)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. Management : Allow the platform to do its bookeeping
	 * at this affinity level
	 */
	rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
					     system_node->level,
					     psci_get_phys_state(system_node));

	/*
	 * Arch. Management. Flush all levels of caches to PoC if
	 * the system is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);

	return rc;
}
Beispiel #9
0
void plat_rockchip_pmu_init(void)
{
	uint32_t cpu;

	rockchip_pd_lock_init();
	plat_setup_rockchip_pm_ops(&pm_ops);

	/* register requires 32bits mode, switch it to 32 bits */
	cpu_warm_boot_addr = (uint64_t)platform_cpu_warmboot;

	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
		cpuson_flags[cpu] = 0;

	psram_sleep_cfg->ddr_func = 0x00;
	psram_sleep_cfg->ddr_data = 0x00;
	psram_sleep_cfg->ddr_flag = 0x00;
	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;

	/* cpu boot from pmusram */
	mmio_write_32(SGRF_BASE + SGRF_SOC_CON0_1(1),
		      (cpu_warm_boot_addr >> CPU_BOOT_ADDR_ALIGN) |
		      CPU_BOOT_ADDR_WMASK);

	nonboot_cpus_off();

	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
}
/*******************************************************************************
 * Routine to return the maximum affinity level to traverse to after a cpu has
 * been physically powered up. It is expected to be called immediately after
 * reset from assembler code.
 ******************************************************************************/
int get_power_on_target_afflvl(void)
{
	int afflvl;

#if DEBUG
	unsigned int state;
	aff_map_node_t *node;

	/* Retrieve our node from the topology tree */
	node = psci_get_aff_map_node(read_mpidr_el1() & MPIDR_AFFINITY_MASK,
				     MPIDR_AFFLVL0);
	assert(node);

	/*
	 * Sanity check the state of the cpu. It should be either suspend or "on
	 * pending"
	 */
	state = psci_get_state(node);
	assert(state == PSCI_STATE_SUSPEND || state == PSCI_STATE_ON_PENDING);
#endif

	/*
	 * Assume that this cpu was suspended and retrieve its target affinity
	 * level. If it is invalid then it could only have been turned off
	 * earlier. PLATFORM_MAX_AFFLVL will be the highest affinity level a
	 * cpu can be turned off to.
	 */
	afflvl = psci_get_suspend_afflvl();
	if (afflvl == PSCI_INVALID_DATA)
		afflvl = PLATFORM_MAX_AFFLVL;
	return afflvl;
}
Beispiel #11
0
static void sq_power_down_common(const psci_power_state_t *target_state)
{
	uint32_t cluster_state = scpi_power_on;
	uint32_t system_state = scpi_power_on;

	/* Prevent interrupts from spuriously waking up this cpu */
	sq_gic_cpuif_disable();

	/* Check if power down at system power domain level is requested */
	if (SQ_SYSTEM_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF)
		system_state = scpi_power_retention;

	/* Cluster is to be turned off, so disable coherency */
	if (SQ_CLUSTER_PWR_STATE(target_state) == SQ_LOCAL_STATE_OFF) {
		plat_sq_interconnect_exit_coherency();
		cluster_state = scpi_power_off;
	}

	/*
	 * Ask the SCP to power down the appropriate components depending upon
	 * their state.
	 */
	scpi_set_sq_power_state(read_mpidr_el1(),
				 scpi_power_off,
				 cluster_state,
				 system_state);
}
/*******************************************************************************
 * This function prepare boot argument for kernel entrypoint
 ******************************************************************************/
void bl31_prepare_kernel_entry(uint64_t k32_64)
{
	entry_point_info_t *next_image_info;
	uint32_t image_type;

	/* Determine which image to execute next */
	/* image_type = bl31_get_next_image_type(); */
	image_type = NON_SECURE;

	/* Program EL3 registers to enable entry into the next EL */
	if (k32_64 == 0)
		next_image_info = bl31_plat_get_next_kernel32_ep_info();
	else
		next_image_info = bl31_plat_get_next_kernel64_ep_info();

	assert(next_image_info);
	assert(image_type == GET_SECURITY_STATE(next_image_info->h.attr));

	INFO("BL3-1: Preparing for EL3 exit to %s world, Kernel\n",
		(image_type == SECURE) ? "secure" : "normal");
	INFO("BL3-1: Next image address = 0x%llx\n",
		(unsigned long long) next_image_info->pc);
	INFO("BL3-1: Next image spsr = 0x%x\n", next_image_info->spsr);
	cm_init_context(read_mpidr_el1(), next_image_info);
	cm_prepare_el3_exit(image_type);
}
static int psci_afflvl1_off(aff_map_node_t *cluster_node)
{
	int rc;

	/* Sanity check the cluster level */
	assert(cluster_node->level == MPIDR_AFFLVL1);

	if (!psci_plat_pm_ops->affinst_off)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. Management. Allow the platform to do its cluster
	 * specific bookeeping e.g. turn off interconnect coherency,
	 * program the power controller etc.
	 */
	rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
					     cluster_node->level,
					     psci_get_phys_state(cluster_node));

	/*
	 * Arch. Management. Flush all levels of caches to PoC if
	 * the cluster is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);

	return rc;
}
/*******************************************************************************
 * STM32MP1 handler called when a power domain is about to be turned on. The
 * mpidr determines the CPU to be turned on.
 * call by core 0 to activate core 1
 ******************************************************************************/
static int stm32_pwr_domain_on(u_register_t mpidr)
{
	unsigned long current_cpu_mpidr = read_mpidr_el1();
	uint32_t bkpr_core1_addr =
		tamp_bkpr(BOOT_API_CORE1_BRANCH_ADDRESS_TAMP_BCK_REG_IDX);
	uint32_t bkpr_core1_magic =
		tamp_bkpr(BOOT_API_CORE1_MAGIC_NUMBER_TAMP_BCK_REG_IDX);

	if (mpidr == current_cpu_mpidr) {
		return PSCI_E_INVALID_PARAMS;
	}

	if ((stm32_sec_entrypoint < STM32MP_SYSRAM_BASE) ||
	    (stm32_sec_entrypoint > (STM32MP_SYSRAM_BASE +
				     (STM32MP_SYSRAM_SIZE - 1)))) {
		return PSCI_E_INVALID_ADDRESS;
	}

	stm32mp_clk_enable(RTCAPB);

	cntfrq_core0 = read_cntfrq_el0();

	/* Write entrypoint in backup RAM register */
	mmio_write_32(bkpr_core1_addr, stm32_sec_entrypoint);

	/* Write magic number in backup register */
	mmio_write_32(bkpr_core1_magic, BOOT_API_A7_CORE1_MAGIC_NUMBER);

	stm32mp_clk_disable(RTCAPB);

	/* Generate an IT to core 1 */
	gicv2_raise_sgi(ARM_IRQ_SEC_SGI_0, STM32MP_SECONDARY_CPU);

	return PSCI_E_SUCCESS;
}
Beispiel #15
0
static void hikey_pwr_domain_suspend_finish(const psci_power_state_t *target_state)
{
	unsigned long mpidr;
	unsigned int cluster, cpu;

	/* Nothing to be done on waking up from retention from CPU level */
	if (CORE_PWR_STATE(target_state) != PLAT_MAX_OFF_STATE)
		return;

	/* Get the mpidr for this cpu */
	mpidr = read_mpidr_el1();
	cluster = (mpidr & MPIDR_CLUSTER_MASK) >> MPIDR_AFF1_SHIFT;
	cpu = mpidr & MPIDR_CPU_MASK;

	/* Enable CCI coherency for cluster */
	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
		cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(mpidr));

	hisi_pwrc_set_core_bx_addr(cpu, cluster, 0);

	if (SYSTEM_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE) {
		gicv2_distif_init();
		gicv2_pcpu_distif_init();
		gicv2_cpuif_enable();
	} else {
		gicv2_pcpu_distif_init();
		gicv2_cpuif_enable();
	}
}
/*******************************************************************************
 * Given a secure payload entrypoint, register width, cpu id & pointer to a
 * context data structure, this function will create a secure context ready for
 * programming an entry into the secure payload.
 ******************************************************************************/
void tlkd_init_tlk_ep_state(struct entry_point_info *tlk_entry_point,
			    uint32_t rw,
			    uint64_t pc,
			    tlk_context_t *tlk_ctx)
{
	uint32_t ep_attr, spsr;

	/* Passing a NULL context is a critical programming error */
	assert(tlk_ctx);
	assert(tlk_entry_point);
	assert(pc);

	/* Associate this context with the cpu specified */
	tlk_ctx->mpidr = read_mpidr_el1();
	clr_std_smc_active_flag(tlk_ctx->state);
	cm_set_context(&tlk_ctx->cpu_ctx, SECURE);

	if (rw == SP_AARCH64)
		spsr = SPSR_64(MODE_EL1, MODE_SP_ELX, DISABLE_ALL_EXCEPTIONS);
	else
		spsr = SPSR_MODE32(MODE32_svc,
				   SPSR_T_ARM,
				   read_sctlr_el3() & SCTLR_EE_BIT,
				   DISABLE_ALL_EXCEPTIONS);

	/* initialise an entrypoint to set up the CPU context */
	ep_attr = SECURE | EP_ST_ENABLE;
	if (read_sctlr_el3() & SCTLR_EE_BIT)
		ep_attr |= EP_EE_BIG;
	SET_PARAM_HEAD(tlk_entry_point, PARAM_EP, VERSION_1, ep_attr);

	tlk_entry_point->pc = pc;
	tlk_entry_point->spsr = spsr;
}
/*******************************************************************************
 * The following functions finish an earlier affinity power on request. They
 * are called by the common finisher routine in psci_common.c.
 ******************************************************************************/
static unsigned int psci_afflvl0_on_finish(aff_map_node_t *cpu_node)
{
	unsigned int plat_state, state, rc;

	assert(cpu_node->level == MPIDR_AFFLVL0);

	/* Ensure we have been explicitly woken up by another cpu */
	state = psci_get_state(cpu_node);
	assert(state == PSCI_STATE_ON_PENDING);

	/*
	 * Plat. management: Perform the platform specific actions
	 * for this cpu e.g. enabling the gic or zeroing the mailbox
	 * register. The actual state of this cpu has already been
	 * changed.
	 */
	if (psci_plat_pm_ops->affinst_on_finish) {

		/* Get the physical state of this cpu */
		plat_state = get_phys_state(state);
		rc = psci_plat_pm_ops->affinst_on_finish(read_mpidr_el1(),
							 cpu_node->level,
							 plat_state);
		assert(rc == PSCI_E_SUCCESS);
	}

	/*
	 * Arch. management: Enable data cache and manage stack memory
	 */
	psci_do_pwrup_cache_maintenance();

	/*
	 * All the platform specific actions for turning this cpu
	 * on have completed. Perform enough arch.initialization
	 * to run in the non-secure address space.
	 */
	bl31_arch_setup();

	/*
	 * Call the cpu on finish handler registered by the Secure Payload
	 * Dispatcher to let it do any bookeeping. If the handler encounters an
	 * error, it's expected to assert within
	 */
	if (psci_spd_pm && psci_spd_pm->svc_on_finish)
		psci_spd_pm->svc_on_finish(0);

	/*
	 * Generic management: Now we just need to retrieve the
	 * information that we had stashed away during the cpu_on
	 * call to set this cpu on its way.
	 */
	cm_prepare_el3_exit(NON_SECURE);

	/* Clean caches before re-entering normal world */
	dcsw_op_louis(DCCSW);

	rc = PSCI_E_SUCCESS;
	return rc;
}
/*******************************************************************************
 * The following function provides a compatibility function for SPDs using the
 * existing cm library routines. This function is expected to be invoked for
 * initializing the cpu_context for the CPU specified by MPIDR for first use.
 ******************************************************************************/
void cm_init_context(unsigned long mpidr, const entry_point_info_t *ep)
{
	if ((mpidr & MPIDR_AFFINITY_MASK) ==
			(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
		cm_init_my_context(ep);
	else
		cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
}
static void
hikey960_pwr_domain_on_finish(const psci_power_state_t *target_state)
{
	if (CLUSTER_PWR_STATE(target_state) == PLAT_MAX_OFF_STATE)
		cci_enable_snoop_dvm_reqs(MPIDR_AFFLVL1_VAL(read_mpidr_el1()));

	gicv2_pcpu_distif_init();
	gicv2_cpuif_enable();
}
Beispiel #20
0
static void nonboot_cpus_off(void)
{
	uint32_t boot_cpu, boot_cluster, cpu;

	boot_cpu = MPIDR_AFFLVL0_VAL(read_mpidr_el1());
	boot_cluster = MPIDR_AFFLVL1_VAL(read_mpidr_el1());

	/* turn off noboot cpus */
	for (cpu = 0; cpu < PLATFORM_CLUSTER0_CORE_COUNT; cpu++) {
		if (!boot_cluster && (cpu == boot_cpu))
			continue;
		cpus_id_power_domain(0, cpu, pmu_pd_off, CKECK_WFEI_MSK);
	}

	for (cpu = 0; cpu < PLATFORM_CLUSTER1_CORE_COUNT; cpu++) {
		if (boot_cluster && (cpu == boot_cpu))
			continue;
		cpus_id_power_domain(1, cpu, pmu_pd_off, CKECK_WFEI_MSK);
	}
}
/* RAS functions common to AArch64 ARM platforms */
void plat_ea_handler(unsigned int ea_reason, uint64_t syndrome, void *cookie,
		void *handle, uint64_t flags)
{
#if RAS_EXTENSION
	/* Call RAS EA handler */
	int handled = ras_ea_handler(ea_reason, syndrome, cookie, handle, flags);
	if (handled != 0)
		return;
#endif

	ERROR("Unhandled External Abort received on 0x%lx at EL3!\n",
			read_mpidr_el1());
	ERROR(" exception reason=%u syndrome=0x%llx\n", ea_reason, syndrome);
	panic();
}
/*******************************************************************************
 * The following function provides a compatibility function for SPDs using the
 * existing cm library routines. This function is expected to be invoked for
 * initializing the cpu_context for the CPU specified by MPIDR for first use.
 ******************************************************************************/
void cm_init_context(uint64_t mpidr, const entry_point_info_t *ep)
{
	if ((mpidr & MPIDR_AFFINITY_MASK) ==
			(read_mpidr_el1() & MPIDR_AFFINITY_MASK))
		cm_init_my_context(ep);
	else {
		/*
		 * Suppress deprecated declaration warning in compatibility
		 * function
		 */
#pragma GCC diagnostic push
#pragma GCC diagnostic ignored "-Wdeprecated-declarations"
		cm_init_context_by_index(platform_get_core_pos(mpidr), ep);
#pragma GCC diagnostic pop
	}
}
/*******************************************************************************
 * STM32MP1 handler called when a power domain is about to be turned on. The
 * mpidr determines the CPU to be turned on.
 * call by core  0 to activate core 1
 ******************************************************************************/
static int stm32_pwr_domain_on(u_register_t mpidr)
{
	unsigned long current_cpu_mpidr = read_mpidr_el1();
	uint32_t tamp_clk_off = 0;
	uint32_t bkpr_core1_addr =
		tamp_bkpr(BOOT_API_CORE1_BRANCH_ADDRESS_TAMP_BCK_REG_IDX);
	uint32_t bkpr_core1_magic =
		tamp_bkpr(BOOT_API_CORE1_MAGIC_NUMBER_TAMP_BCK_REG_IDX);

	if (mpidr == current_cpu_mpidr) {
		return PSCI_E_INVALID_PARAMS;
	}

	if ((stm32_sec_entrypoint < STM32MP1_SRAM_BASE) ||
	    (stm32_sec_entrypoint > (STM32MP1_SRAM_BASE +
				     (STM32MP1_SRAM_SIZE - 1)))) {
		return PSCI_E_INVALID_ADDRESS;
	}

	if (!stm32mp1_clk_is_enabled(RTCAPB)) {
		tamp_clk_off = 1;
		if (stm32mp1_clk_enable(RTCAPB) != 0) {
			panic();
		}
	}

	cntfrq_core0 = read_cntfrq_el0();

	/* Write entrypoint in backup RAM register */
	mmio_write_32(bkpr_core1_addr, stm32_sec_entrypoint);

	/* Write magic number in backup register */
	mmio_write_32(bkpr_core1_magic, BOOT_API_A7_CORE1_MAGIC_NUMBER);

	if (tamp_clk_off != 0U) {
		if (stm32mp1_clk_disable(RTCAPB) != 0) {
			panic();
		}
	}

	/* Generate an IT to core 1 */
	mmio_write_32(STM32MP1_GICD_BASE + GICD_SGIR,
		      SEND_SECURE_IT_TO_CORE_1 | ARM_IRQ_SEC_SGI_0);

	return PSCI_E_SUCCESS;
}
Beispiel #24
0
void plat_rockchip_pmu_init(void)
{
	uint32_t cpu;

	plat_setup_rockchip_pm_ops(&pm_ops);

	for (cpu = 0; cpu < PLATFORM_CORE_COUNT; cpu++)
		cpuson_flags[cpu] = 0;

	psram_sleep_cfg->sys_mode = PMU_SYS_ON_MODE;

	psram_sleep_cfg->boot_mpidr = read_mpidr_el1() & 0xffff;

	nonboot_cpus_off();
	INFO("%s(%d): pd status %x\n", __func__, __LINE__,
	     mmio_read_32(PMU_BASE + PMU_PWRDN_ST));
}
static int psci_afflvl2_suspend(aff_map_node_t *system_node,
				unsigned long ns_entrypoint,
				unsigned long context_id,
				unsigned int power_state)
{
	unsigned int plat_state;
	unsigned long psci_entrypoint;
	int rc;

	/* Cannot go beyond this */
	assert(system_node->level == MPIDR_AFFLVL2);

	/*
	 * Keep the physical state of the system handy to decide what
	 * action needs to be taken
	 */
	plat_state = psci_get_phys_state(system_node);

	/*
	 * Plat. Management : Allow the platform to do its bookeeping
	 * at this affinity level
	 */
	if (!psci_plat_pm_ops->affinst_suspend)
		return PSCI_E_SUCCESS;

	/*
	 * Sending the psci entrypoint is currently redundant
	 * beyond affinity level 0 but one never knows what a
	 * platform might do. Also it allows us to keep the
	 * platform handler prototype the same.
	 */
	plat_state = psci_get_phys_state(system_node);
	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
	rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
						 psci_entrypoint,
						 ns_entrypoint,
						 system_node->level,
						 plat_state);
	/*
	 * Arch. management: Flush all levels of caches to PoC if the
	 * system is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL2);

	return rc;
}
Beispiel #26
0
/*******************************************************************************
 * FVP handler called when an affinity instance is about to be suspended. The
 * level and mpidr determine the affinity instance. The 'state' arg. allows the
 * platform to decide whether the cluster is being turned off and take apt
 * actions.
 *
 * CAUTION: There is no guarantee that caches will remain turned on across calls
 * to this function as each affinity level is dealt with. So do not write & read
 * global variables across calls. It will be wise to do flush a write to the
 * global to prevent unpredictable results.
 ******************************************************************************/
int plat_affinst_suspend(unsigned long mpidr,
			unsigned long sec_entrypoint,
			unsigned long ns_entrypoint,
			unsigned int afflvl,
			unsigned int state)
{
	unsigned int ectlr;
	/* Determine if any platform actions need to be executed. */
	if (plat_do_plat_actions(afflvl, state) == -EAGAIN)
		return PSCI_E_SUCCESS;

	//set cpu0 as aa64 for cpu reset
	mmio_write_32(MP0_MISC_CONFIG3, mmio_read_32(MP0_MISC_CONFIG3) | (1<<12));

	ectlr = read_cpuectlr();
	ectlr &= ~CPUECTLR_SMP_BIT;
	write_cpuectlr(ectlr);

	/* Program the jump address for the target cpu */
	plat_program_mailbox(read_mpidr_el1(), sec_entrypoint);

	/* Program the power controller to enable wakeup interrupts. */
	// plat_pwrc_set_wen(mpidr);

	/* Perform the common cpu specific operations */
	// plat_cpu_pwrdwn_common();
	gic_cpuif_deactivate(get_plat_config()->gicc_base);

	/* Perform the common cluster specific operations */
	if (afflvl >= MPIDR_AFFLVL1) {
		// plat_cluster_pwrdwn_common();
		if (get_plat_config()->flags & CONFIG_HAS_CCI)
			cci_disable_cluster_coherency(mpidr);

		disable_scu(mpidr);
	}
	if (afflvl >= MPIDR_AFFLVL2) {
		plat_save_el3_dormant_data();
		generic_timer_backup();
		gic_dist_save();
	}

	return PSCI_E_SUCCESS;
}
/*******************************************************************************
 * The next three functions implement a handler for each supported affinity
 * level which is called when that affinity level is turned off.
 ******************************************************************************/
static int psci_afflvl0_off(aff_map_node_t *cpu_node)
{
	int rc;

	assert(cpu_node->level == MPIDR_AFFLVL0);

	/*
	 * Generic management: Get the index for clearing any lingering re-entry
	 * information and allow the secure world to switch itself off
	 */

	/*
	 * Call the cpu off handler registered by the Secure Payload Dispatcher
	 * to let it do any bookeeping. Assume that the SPD always reports an
	 * E_DENIED error if SP refuse to power down
	 */
	if (psci_spd_pm && psci_spd_pm->svc_off) {
		rc = psci_spd_pm->svc_off(0);
		if (rc)
			return rc;
	}

	if (!psci_plat_pm_ops->affinst_off)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. management: Perform platform specific actions to turn this
	 * cpu off e.g. exit cpu coherency, program the power controller etc.
	 */
	rc = psci_plat_pm_ops->affinst_off(read_mpidr_el1(),
					     cpu_node->level,
					     psci_get_phys_state(cpu_node));
	/*
	 * Arch. management. Perform the necessary steps to flush all
	 * cpu caches.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL0);

	return rc;
}
/*******************************************************************************
 * Given a secure payload entrypoint info pointer, entry point PC, register
 * width, cpu id & pointer to a context data structure, this function will
 * initialize tsp context and entry point info for the secure payload
 ******************************************************************************/
void tspd_init_tsp_ep_state(struct entry_point_info *tsp_entry_point,
				uint32_t rw,
				uint64_t pc,
				tsp_context_t *tsp_ctx)
{
	uint32_t ep_attr;

	/* Passing a NULL context is a critical programming error */
	assert(tsp_ctx);
	assert(tsp_entry_point);
	assert(pc);

	/*
	 * We support AArch64 TSP for now.
	 * TODO: Add support for AArch32 TSP
	 */
	assert(rw == TSP_AARCH64);

	/* Associate this context with the cpu specified */
	tsp_ctx->mpidr = read_mpidr_el1();
	tsp_ctx->state = 0;
	set_tsp_pstate(tsp_ctx->state, TSP_PSTATE_OFF);
	clr_yield_smc_active_flag(tsp_ctx->state);

	cm_set_context(&tsp_ctx->cpu_ctx, SECURE);

	/* initialise an entrypoint to set up the CPU context */
	ep_attr = SECURE | EP_ST_ENABLE;
	if (read_sctlr_el3() & SCTLR_EE_BIT)
		ep_attr |= EP_EE_BIG;
	SET_PARAM_HEAD(tsp_entry_point, PARAM_EP, VERSION_1, ep_attr);

	tsp_entry_point->pc = pc;
	tsp_entry_point->spsr = SPSR_64(MODE_EL1,
					MODE_SP_ELX,
					DISABLE_ALL_EXCEPTIONS);
	zeromem(&tsp_entry_point->args, sizeof(tsp_entry_point->args));
}
static int psci_afflvl1_suspend(aff_map_node_t *cluster_node,
				unsigned long ns_entrypoint,
				unsigned long context_id,
				unsigned int power_state)
{
	unsigned int plat_state;
	unsigned long psci_entrypoint;
	int rc;

	/* Sanity check the cluster level */
	assert(cluster_node->level == MPIDR_AFFLVL1);

	if (!psci_plat_pm_ops->affinst_suspend)
		return PSCI_E_SUCCESS;

	/*
	 * Plat. Management. Allow the platform to do its cluster specific
	 * bookeeping e.g. turn off interconnect coherency, program the power
	 * controller etc. Sending the psci entrypoint is currently redundant
	 * beyond affinity level 0 but one never knows what a platform might
	 * do. Also it allows us to keep the platform handler prototype the
	 * same.
	 */
	plat_state = psci_get_phys_state(cluster_node);
	psci_entrypoint = (unsigned long) psci_aff_suspend_finish_entry;
	rc = psci_plat_pm_ops->affinst_suspend(read_mpidr_el1(),
						 psci_entrypoint,
						 ns_entrypoint,
						 cluster_node->level,
						 plat_state);
	/*
	 * Arch. management: Flush all levels of caches to PoC if the
	 * cluster is to be shutdown.
	 */
	psci_do_pwrdown_cache_maintenance(MPIDR_AFFLVL1);

	return rc;
}
/*******************************************************************************
 * The following functions finish an earlier affinity suspend request. They
 * are called by the common finisher routine in psci_common.c.
 ******************************************************************************/
static unsigned int psci_afflvl0_suspend_finish(aff_map_node_t *cpu_node)
{
	unsigned int plat_state, state, rc;
	int32_t suspend_level;
	uint64_t counter_freq;

	assert(cpu_node->level == MPIDR_AFFLVL0);

	/* Ensure we have been woken up from a suspended state */
	state = psci_get_state(cpu_node);
	assert(state == PSCI_STATE_SUSPEND);

	/*
	 * Plat. management: Perform the platform specific actions
	 * before we change the state of the cpu e.g. enabling the
	 * gic or zeroing the mailbox register. If anything goes
	 * wrong then assert as there is no way to recover from this
	 * situation.
	 */
	if (psci_plat_pm_ops->affinst_suspend_finish) {

		/* Get the physical state of this cpu */
		plat_state = get_phys_state(state);
		rc = psci_plat_pm_ops->affinst_suspend_finish(read_mpidr_el1(),
							      cpu_node->level,
							      plat_state);
		assert(rc == PSCI_E_SUCCESS);
	}

	/* Get the index for restoring the re-entry information */
	/*
	 * Arch. management: Enable the data cache, manage stack memory and
	 * restore the stashed EL3 architectural context from the 'cpu_context'
	 * structure for this cpu.
	 */
	psci_do_pwrup_cache_maintenance();

	/* Re-init the cntfrq_el0 register */
	counter_freq = plat_get_syscnt_freq();
	write_cntfrq_el0(counter_freq);

	/*
	 * Call the cpu suspend finish handler registered by the Secure Payload
	 * Dispatcher to let it do any bookeeping. If the handler encounters an
	 * error, it's expected to assert within
	 */
	if (psci_spd_pm && psci_spd_pm->svc_suspend) {
		suspend_level = psci_get_suspend_afflvl();
		assert (suspend_level != PSCI_INVALID_DATA);
		psci_spd_pm->svc_suspend_finish(suspend_level);
	}

	/* Invalidate the suspend context for the node */
	psci_set_suspend_power_state(PSCI_INVALID_DATA);

	/*
	 * Generic management: Now we just need to retrieve the
	 * information that we had stashed away during the suspend
	 * call to set this cpu on its way.
	 */
	cm_prepare_el3_exit(NON_SECURE);

	/* Clean caches before re-entering normal world */
	dcsw_op_louis(DCCSW);

	rc = PSCI_E_SUCCESS;
	return rc;
}